hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16c419fdca7106c111bc212d4d4e57e97bd1d1d7
| 2,550
|
py
|
Python
|
resources/python/dnac/01_dnac.py
|
wwt/curl-requests-foundations
|
63429d82b4f3d11902c365c1ae7803e137c718e8
|
[
"Apache-2.0"
] | null | null | null |
resources/python/dnac/01_dnac.py
|
wwt/curl-requests-foundations
|
63429d82b4f3d11902c365c1ae7803e137c718e8
|
[
"Apache-2.0"
] | 2
|
2021-07-14T08:47:48.000Z
|
2021-10-30T23:33:23.000Z
|
resources/python/dnac/01_dnac.py
|
wwt/curl-requests-foundations
|
63429d82b4f3d11902c365c1ae7803e137c718e8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Step 1 - Authenticate & retrieve a session token
import requests
auth_url = 'https://sandboxdnac.cisco.com/dna/system/api/v1/auth/token'
headers = {'Content-Type':'application/json'}
auth_data = ('devnetuser', 'Cisco123!')
response = requests.post(auth_url, headers=headers, auth=auth_data)
print(response.json()) # display the session token
# Step 2 - Store the token in a variable
token = {'X-Auth-Token':response.json()['Token']}
headers.update(token)
print(headers) # display the updated headers
# Step 3 - Get a list of DNAC devices with the stored token
from pprint import pprint
get_url = 'https://sandboxdnac.cisco.com/dna/intent/api/v1/network-device'
get_response = requests.get(get_url, headers=headers)
pprint(get_response.json()) # display all devices
print() # blank line between output
pprint(get_response.json()['response'][0]) #display the first device in the response
# Step 4 - Run a 'show version' command on a device
cli_commands = {'name':'show ver','commands':['show ver'],'deviceUuids':['f16955ae-c349-47e9-8e8f-9b62104ab604']}
post_url = 'https://sandboxdnac.cisco.com/dna/intent/api/v1/network-device-poller/cli/read-request'
post_response = requests.post(post_url, headers=headers, json=cli_commands)
print(f'{post_response.status_code} {post_response.reason}')
print(post_response.json())
# Step 5 - If DNAC returns a '202 Accepted' for the previous task, check the status of the request and get the file ID
task_id = post_response.json()['response']['taskId']
get_url = f'https://sandboxdnac.cisco.com/dna/intent/api/v1/task/{task_id}'
get_response = requests.get(get_url, headers=headers)
print(f'{get_response.status_code} {get_response.reason}')
print(get_response.json())
file = get_response.json()['response']['progress']
# Step 6 - # DNAC returns a string object which looks like a dictionary
# Remediation option #1 - use the JSON module to convert the string to a dictionary
import json
file = get_response.json()['response']['progress']
file_id_dict = json.loads(file)
file_id = file_id_dict['fileId']
# Remediation option #2 - use regex to parse the file ID
import re
search_pattern = re.compile(r'[a-f0-9-]+(?="})')
match = search_pattern.search(file)
file_id = match.group(0)
# Step 7 - If DNAC returns a '200 OK' for the previous command, get the file contents
get_url = f'https://sandboxdnac.cisco.com/dna/intent/api/v1/file/{file_id}'
get_response = requests.get(get_url, headers=headers)
print(f'{get_response.status_code} {get_response.reason}')
print(get_response.json())
| 45.535714
| 118
| 0.75451
|
5865e1444a4b70865e5a58a5ba9893858d340003
| 124
|
py
|
Python
|
web/settings/dev.py
|
koualsky/start
|
544fde3a353f78a7a6d5782d240d98644731d6b0
|
[
"MIT"
] | null | null | null |
web/settings/dev.py
|
koualsky/start
|
544fde3a353f78a7a6d5782d240d98644731d6b0
|
[
"MIT"
] | null | null | null |
web/settings/dev.py
|
koualsky/start
|
544fde3a353f78a7a6d5782d240d98644731d6b0
|
[
"MIT"
] | null | null | null |
from .base import *
STATIC_URL = '/static/'
STATIC_ROOT = '/code/static'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/code/media'
| 15.5
| 28
| 0.677419
|
53f15956fa97bfa6fe44a7a9988c274aac6f0f79
| 1,462
|
py
|
Python
|
tests/test_link_annotation.py
|
abael/ScrapyPageFinder
|
10a90bde78abc0649c7966a358593435c9c05738
|
[
"MIT"
] | 31
|
2016-02-05T08:13:50.000Z
|
2022-01-05T09:25:07.000Z
|
tests/test_link_annotation.py
|
abael/ScrapyPageFinder
|
10a90bde78abc0649c7966a358593435c9c05738
|
[
"MIT"
] | null | null | null |
tests/test_link_annotation.py
|
abael/ScrapyPageFinder
|
10a90bde78abc0649c7966a358593435c9c05738
|
[
"MIT"
] | 12
|
2016-02-23T09:12:37.000Z
|
2021-11-17T07:21:04.000Z
|
import os.path
import page_finder
from util import extract_all_links
try:
FILE = __file__
except NameError:
FILE = './tests'
TESTDIR = os.getenv('TESTPATH',
os.path.dirname(os.path.realpath(FILE)))
def get_local_url(filename):
return 'file:///{0}/{1}'.format(os.path.join(TESTDIR, 'data'), filename)
def test_hnews():
link_annotation = page_finder.LinkAnnotation()
link_annotation.load(
extract_all_links(get_local_url('Hacker News 1.html')))
link_annotation.mark_link('https://news.ycombinator.com/news?p=2')
link_annotation.load(
extract_all_links(get_local_url('Hacker News 2.html')))
best = link_annotation.best_links_to_follow()
assert(best[0] == 'https://news.ycombinator.com/news?p=2')
assert(best[1] == 'https://news.ycombinator.com/news?p=3')
link_annotation.prune(100)
assert(len(link_annotation.links) <= 100)
assert(best[0] == 'https://news.ycombinator.com/news?p=2')
assert(best[1] == 'https://news.ycombinator.com/news?p=3')
def test_equal_distance():
link_annotation = page_finder.LinkAnnotation()
link_annotation.mark_link('http://page_1')
more_links = [
'http://page_2',
'http://page_3',
'http://page_4',
'http://page_5',
'http://page_6',
'http://page_7',
]
link_annotation.load(more_links)
for link in more_links:
assert link_annotation.is_follow_link(link)
| 27.074074
| 76
| 0.660055
|
978d8b2fce4186f5907ce1c62af41a4ef23a3596
| 194
|
py
|
Python
|
server/views/main/__init__.py
|
ojengwa/zapcore
|
f9eace7dc8ab4bc8bc3bb9c212ba43395e0459c1
|
[
"MIT"
] | null | null | null |
server/views/main/__init__.py
|
ojengwa/zapcore
|
f9eace7dc8ab4bc8bc3bb9c212ba43395e0459c1
|
[
"MIT"
] | 3
|
2020-09-05T08:03:34.000Z
|
2021-05-07T20:03:30.000Z
|
server/views/main/__init__.py
|
ojengwa/zapcore
|
f9eace7dc8ab4bc8bc3bb9c212ba43395e0459c1
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from flask import Blueprint
from flask_cors import CORS
main = Blueprint(
'main',
__name__
)
# add CORS support
CORS(main)
from . import views
| 12.933333
| 38
| 0.747423
|
d70463f472e0d0fa870dbb54110e54072b119978
| 3,792
|
py
|
Python
|
mudicom/lookup.py
|
neurosnap/mudicom
|
04011967007409f0c5253b4f308f53a7b0fc99c6
|
[
"MIT"
] | 32
|
2015-01-09T10:51:06.000Z
|
2021-04-03T09:50:52.000Z
|
mudicom/lookup.py
|
neurosnap/mudicom
|
04011967007409f0c5253b4f308f53a7b0fc99c6
|
[
"MIT"
] | 3
|
2015-05-07T19:15:26.000Z
|
2021-04-16T15:13:23.000Z
|
mudicom/lookup.py
|
neurosnap/mudicom
|
04011967007409f0c5253b4f308f53a7b0fc99c6
|
[
"MIT"
] | 18
|
2015-04-11T14:24:42.000Z
|
2020-12-27T21:26:06.000Z
|
# -*- coding: utf-8 -*-
"""
mudicom.lookup
~~~~~~~~~~~~~~
Basic dictionary lookup helper functions.
"""
def VR(VR=None, description=None):
""" Value Representation (VR) <-> Description lookup.
:param VR: Takes the VR and returns its description
:param description: Take the description of a VR and returns the VR
"""
value_repr = {
"AE": "Application Entity",
"AS": "Age String",
"AT": "Attribute Tag",
"CS": "Code String",
"DA": "Date",
"DS": "Decimal String",
"DT": "Date/Time",
"FL": "Floating Point Single (4 bytes)",
"FD": "Floating Point Double (8 bytes)",
"IS": "Integer String",
"LO": "Long String",
"LT": "Long Text",
"OB": "Other Byte",
"OF": "Other Float",
"OW": "Other Word",
"PN": "Person Name",
"SH": "Short String",
"SL": "Signed Long",
"SQ": "Sequence of Items",
"SS": "Signed Short",
"ST": "Short Text",
"TM": "Time",
"UI": "Unique Identifier",
"UL": "Unsigned Long",
"UN": "Unknown",
"US": "Unsigned Short",
"UT": "Unlimited Text"
}
assert VR or description, "Either VR or description required to map VR"
if VR is not None:
VR = VR.upper()
if VR in value_repr:
return value_repr[VR]
for key, value in value_repr.iteritems():
if description == value:
return key
return None
def transfer_syntax(UID=None, description=None):
""" Transfer Syntax UID <-> Description lookup.
:param UID: Transfer Syntax UID, returns description
:param description: Take the description of a transfer syntax and return its UID
"""
transfer_syntax = {
"1.2.840.10008.1.2": "Implicit VR Endian: Default Transfer Syntax for DICOM",
"1.2.840.10008.1.2.1": "Explicit VR Little Endian",
"1.2.840.10008.1.2.1.99": "Deflated Explicit VR Big Endian",
"1.2.840.10008.1.2.2": "Explicit VR Big Endian",
"1.2.840.10008.1.2.4.50": "JPEG Baseline (Process 1): Default Transfer Syntax for Lossy JPEG 8-bit Image Compression",
"1.2.840.10008.1.2.4.51": "JPEG Baseline (Processes 2 & 4): Default Transfer Syntax for Lossy JPEG 12-bit Image Compression (Process 4 only)",
"1.2.840.10008.1.2.4.57": "JPEG Lossless, Nonhierarchical (Processes 14)",
"1.2.840.10008.1.2.4.70": "JPEG Lossless, Nonhierarchical, First-Order Prediction (Processes 14 [Selection Value 1])",
"1.2.840.10008.1.2.4.80": "JPEG-LS Lossless Image Compression",
"1.2.840.10008.1.2.4.81": "JPEG-LS Lossy (Near- Lossless) Image Compression",
"1.2.840.10008.1.2.4.90": "JPEG 2000 Image Compression (Lossless Only)",
"1.2.840.10008.1.2.4.91": "JPEG 2000 Image Compression",
"1.2.840.10008.1.2.4.92": "JPEG 2000 Part 2 Multicomponent Image Compression (Lossless Only)",
"1.2.840.10008.1.2.4.93": "JPEG 2000 Part 2 Multicomponent Image Compression",
"1.2.840.10008.1.2.4.94": "JPIP Referenced",
"1.2.840.10008.1.2.4.95": "JPIP Referenced Deflate",
"1.2.840.10008.1.2.5": "RLE Lossless",
"1.2.840.10008.1.2.6.1": "RFC 2557 MIME Encapsulation",
"1.2.840.10008.1.2.4.100": "MPEG2 Main Profile Main Level",
"1.2.840.10008.1.2.4.102": "MPEG-4 AVC/H.264 High Profile / Level 4.1",
"1.2.840.10008.1.2.4.103": "MPEG-4 AVC/H.264 BD-compatible High Profile / Level 4.1"
}
assert UID or description, "Either Transfer syntax UID or description required"
if UID in transfer_syntax:
return transfer_syntax[UID]
for key, value in transfer_syntax.iteritems():
if description == value:
return key
return None
| 39.915789
| 150
| 0.593091
|
59372ad6157ec2e465098f30d8f920d8453839fc
| 1,059
|
py
|
Python
|
skp_edu_docker/code/master/workflow/dataconf/workflow_dataconf_image.py
|
TensorMSA/hoyai_docker
|
12f0041e6306d8a6421585a4b51666bad30be442
|
[
"MIT"
] | 8
|
2017-06-16T00:19:12.000Z
|
2020-08-13T03:15:57.000Z
|
kict_edu_docker/code/master/workflow/dataconf/workflow_dataconf_image.py
|
TensorMSA/tensormsa_docker
|
12f0041e6306d8a6421585a4b51666bad30be442
|
[
"MIT"
] | 21
|
2017-06-09T10:15:14.000Z
|
2018-03-29T07:51:02.000Z
|
skp_edu_docker/code/master/workflow/dataconf/workflow_dataconf_image.py
|
TensorMSA/hoyai_docker
|
12f0041e6306d8a6421585a4b51666bad30be442
|
[
"MIT"
] | 4
|
2017-10-25T09:59:53.000Z
|
2020-05-07T09:51:11.000Z
|
from master.workflow.dataconf.workflow_dataconf import WorkFlowDataConf
class WorkflowDataConfFrame(WorkFlowDataConf):
"""
"""
def get_view_obj(self):
"""
get column type info for view
:return:
"""
self._get_default_type()
self._get_modified_type()
return None
def set_view_obj(self, obj):
"""
set column type info on db json filed
:param obj:
:return:
"""
return None
def _get_lable_list(self):
"""
:return:
"""
return None
def _get_unlable_list(self):
"""
:return:
"""
return None
def _get_semi_rule(self):
"""
:return:
"""
return None
def _set_lable_list(self):
"""
:return:
"""
return None
def _set_unlable_list(self):
"""
:return:
"""
return None
def _set_semi_rule(self):
"""
:return:
"""
return None
| 16.045455
| 71
| 0.486308
|
5e7630a1be13ffcb0ad28e78039265530dee3bae
| 1,172
|
py
|
Python
|
custom_components/browser_mod/sensor.py
|
racelandshop/hass-browser_mod
|
122c9df959336c1f3d09c79f1c977eb78147957a
|
[
"MIT"
] | 1
|
2021-12-22T23:21:37.000Z
|
2021-12-22T23:21:37.000Z
|
custom_components/browser_mod/sensor.py
|
racelandshop/hass-browser_mod
|
122c9df959336c1f3d09c79f1c977eb78147957a
|
[
"MIT"
] | null | null | null |
custom_components/browser_mod/sensor.py
|
racelandshop/hass-browser_mod
|
122c9df959336c1f3d09c79f1c977eb78147957a
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from homeassistant.const import STATE_UNAVAILABLE
from .helpers import setup_platform, BrowserModEntity
PLATFORM = "sensor"
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
return setup_platform(hass, config, async_add_devices, PLATFORM, BrowserModSensor)
async def async_setup_entry(hass, config_entry, async_add_entities):
await async_setup_platform(hass, {}, async_add_entities)
class BrowserModSensor(BrowserModEntity):
domain = PLATFORM
def __init__(self, hass, connection, deviceID, alias=None):
super().__init__(hass, connection, deviceID, alias)
self.last_seen = None
def updated(self):
self.last_seen = datetime.now()
self.schedule_update_ha_state()
@property
def state(self):
if not self.connection.connection:
return STATE_UNAVAILABLE
return len(self.connection.connection)
@property
def extra_state_attributes(self):
return {
"type": "browser_mod",
"last_seen": self.last_seen,
"deviceID": self.deviceID,
**self.data,
}
| 27.255814
| 86
| 0.695392
|
4190572b1fdb76f146278e090e4bb694ed76a037
| 8,854
|
py
|
Python
|
qstkutil/qsdateutil.py
|
elxavicio/QSTK
|
4981506c37227a72404229d5e1e0887f797a5d57
|
[
"BSD-3-Clause"
] | 55
|
2015-01-07T17:58:40.000Z
|
2018-12-30T12:31:53.000Z
|
qstkutil/qsdateutil.py
|
elxavicio/QSTK
|
4981506c37227a72404229d5e1e0887f797a5d57
|
[
"BSD-3-Clause"
] | 3
|
2016-12-20T19:21:18.000Z
|
2019-02-19T14:33:18.000Z
|
qstkutil/qsdateutil.py
|
elxavicio/QSTK
|
4981506c37227a72404229d5e1e0887f797a5d57
|
[
"BSD-3-Clause"
] | 38
|
2015-03-02T17:24:06.000Z
|
2019-01-06T13:51:19.000Z
|
'''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Jan 1, 2011
@author:Drew Bratcher
@contact: dbratcher@gatech.edu
@summary: Contains tutorial for backtester and report.
'''
__version__ = "$Revision: 295 $"
import datetime as dt
from datetime import timedelta
import time as t
import numpy as np
import os
import pandas as pd
def _cache_dates():
''' Caches dates '''
try:
filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
except KeyError:
print "Please be sure to set the value for QS in config.sh or\n"
print "in local.sh and then \'source local.sh\'.\n"
datestxt = np.loadtxt(filename,dtype=str)
dates = []
for i in datestxt:
dates.append(dt.datetime.strptime(i,"%m/%d/%Y"))
return pd.TimeSeries(index=dates, data=dates)
GTS_DATES = _cache_dates()
def getMonthNames():
return(['JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC'])
def getYears(funds):
years=[]
for date in funds.index:
if(not(date.year in years)):
years.append(date.year)
return(years)
def getMonths(funds,year):
months=[]
for date in funds.index:
if((date.year==year) and not(date.month in months)):
months.append(date.month)
return(months)
def getDays(funds,year,month):
days=[]
for date in funds.index:
if((date.year==year) and (date.month==month)):
days.append(date)
return(days)
def getDaysBetween(ts_start, ts_end):
days=[]
for i in range(0,(ts_end-ts_start).days):
days.append(ts_start+timedelta(days=1)*i)
return(days)
def getFirstDay(funds,year,month):
for date in funds.index:
if((date.year==year) and (date.month==month)):
return(date)
return('ERROR')
def getLastDay(funds,year,month):
return_date = 'ERROR'
for date in funds.index:
if((date.year==year) and (date.month==month)):
return_date = date
return(return_date)
def getNextOptionClose(day, trade_days, offset=0):
#get third friday in month of day
#get first of month
year_off=0
if day.month+offset > 12:
year_off = 1
offset = offset - 12
first = dt.datetime(day.year+year_off, day.month+offset, 1, hour=16)
#get weekday
day_num = first.weekday()
#get first friday (friday - weekday) add 7 if less than 1
dif = 5 - day_num
if dif < 1:
dif = dif+7
#move to third friday
dif = dif + 14
friday = first+dt.timedelta(days=(dif-1))
#if friday is a holiday, options expire then
if friday in trade_days:
month_close = first + dt.timedelta(days=dif)
else:
month_close = friday
#if day is past the day after that
if month_close < day:
return_date = getNextOptionClose(day, trade_days, offset=1)
else:
return_date = month_close
return(return_date)
def getLastOptionClose(day, trade_days):
start = day
while getNextOptionClose(day, trade_days)>=start:
day= day - dt.timedelta(days=1)
return(getNextOptionClose(day, trade_days))
def getNYSEoffset(mark, offset):
''' Returns NYSE date offset by number of days '''
mark = mark.replace(hour=0, minute=0, second=0, microsecond=0)
i = GTS_DATES.index.searchsorted(mark, side='right')
# If there is no exact match, take first date in past
if GTS_DATES[i] != mark:
i -= 1
ret = GTS_DATES[i + offset]
ret = ret.replace(hour=16)
return ret
def getNYSEdays(startday = dt.datetime(1964,7,5), endday = dt.datetime(2020,12,31),
timeofday = dt.timedelta(0)):
"""
@summary: Create a list of timestamps between startday and endday (inclusive)
that correspond to the days there was trading at the NYSE. This function
depends on a separately created a file that lists all days since July 4,
1962 that the NYSE has been open, going forward to 2020 (based
on the holidays that NYSE recognizes).
@param startday: First timestamp to consider (inclusive)
@param endday: Last day to consider (inclusive)
@return list: of timestamps between startday and endday on which NYSE traded
@rtype datetime
"""
start = startday - timeofday
end = endday - timeofday
dates = GTS_DATES[start:end]
ret = [x + timeofday for x in dates]
return(ret)
def getNextNNYSEdays(startday, days, timeofday):
"""
@summary: Create a list of timestamps from startday that is days days long
that correspond to the days there was trading at NYSE. This function
depends on the file used in getNYSEdays and assumes the dates within are
in order.
@param startday: First timestamp to consider (inclusive)
@param days: Number of timestamps to return
@return list: List of timestamps starting at startday on which NYSE traded
@rtype datetime
"""
try:
filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
except KeyError:
print "Please be sure to set the value for QS in config.sh or\n"
print "in local.sh and then \'source local.sh\'.\n"
datestxt = np.loadtxt(filename,dtype=str)
dates=[]
for i in datestxt:
if(len(dates)<days):
if((dt.datetime.strptime(i,"%m/%d/%Y")+timeofday)>=startday):
dates.append(dt.datetime.strptime(i,"%m/%d/%Y")+timeofday)
return(dates)
def getPrevNNYSEday(startday, timeofday):
"""
@summary: This function returns the last valid trading day before the start
day, or returns the start day if it is a valid trading day. This function
depends on the file used in getNYSEdays and assumes the dates within are
in order.
@param startday: First timestamp to consider (inclusive)
@param days: Number of timestamps to return
@return list: List of timestamps starting at startday on which NYSE traded
@rtype datetime
"""
try:
filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
except KeyError:
print "Please be sure to set the value for QS in config.sh or\n"
print "in local.sh and then \'source local.sh\'.\n"
datestxt = np.loadtxt(filename,dtype=str)
#''' Set return to first day '''
dtReturn = dt.datetime.strptime( datestxt[0],"%m/%d/%Y")+timeofday
#''' Loop through all but first '''
for i in datestxt[1:]:
dtNext = dt.datetime.strptime(i,"%m/%d/%Y")
#''' If we are > startday, then use previous valid day '''
if( dtNext > startday ):
break
dtReturn = dtNext + timeofday
return(dtReturn)
def ymd2epoch(year, month, day):
"""
@summary: Convert YMD info into a unix epoch value.
@param year: The year
@param month: The month
@param day: The day
@return epoch: number of seconds since epoch
"""
return(t.mktime(dt.date(year,month,day).timetuple()))
def epoch2date(ts):
"""
@summary Convert seconds since epoch into date
@param ts: Seconds since epoch
@return thedate: A date object
"""
tm = t.gmtime(ts)
return(dt.date(tm.tm_year,tm.tm_mon,tm.tm_mday))
def _trade_dates(dt_start, dt_end, s_period):
'''
@summary: Generate dates on which we need to trade
@param c_strat: Strategy config class
@param dt_start: Start date
@param dt_end: End date
'''
ldt_timestamps = getNYSEdays(dt_start,
dt_end, dt.timedelta(hours=16) )
# Use pandas reindex method instead
# Note, dates are index as well as values, we select based on index
# but return values since it is a numpy array of datetimes instead of
# pandas specific.
ts_dates = pd.TimeSeries(index=ldt_timestamps, data=ldt_timestamps)
# These are the dates we want
if s_period[:2] == 'BW':
# special case for biweekly
dr_range = pd.DateRange(dt_start, dt_end,
timeRule=s_period[1:])
dr_range = np.asarray(dr_range)
li_even = np.array(range(len(dr_range)))
dr_range = dr_range[li_even[li_even % 2 == 0]]
else:
dr_range = pd.DateRange(dt_start, dt_end,
timeRule=s_period)
dr_range = np.asarray(dr_range)
# Warning, we MUST copy the date range, if we modify it it will be returned
# in it's modified form the next time we use it.
dr_range = np.copy(dr_range)
dr_range += pd.DateOffset(hours=16)
ts_dates = ts_dates.reindex( dr_range, method='bfill' )
ldt_dates = ts_dates[ts_dates.notnull()].values
#Make unique
sdt_unique = set()
ldt_dates = [x for x in ldt_dates
if x not in sdt_unique and not sdt_unique.add(x)]
return ldt_dates
| 30.958042
| 85
| 0.650102
|
356a59f9fdda7e5addb39b33e6e9a4e93199b097
| 24,826
|
py
|
Python
|
src/container/kube-manager/kube_manager/vnc/vnc_pod.py
|
kaweue/contrail-controller
|
66a8f1d13e2c28ddae6b5a5be6f068a03bea94e3
|
[
"Apache-2.0"
] | null | null | null |
src/container/kube-manager/kube_manager/vnc/vnc_pod.py
|
kaweue/contrail-controller
|
66a8f1d13e2c28ddae6b5a5be6f068a03bea94e3
|
[
"Apache-2.0"
] | null | null | null |
src/container/kube-manager/kube_manager/vnc/vnc_pod.py
|
kaweue/contrail-controller
|
66a8f1d13e2c28ddae6b5a5be6f068a03bea94e3
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2017 Juniper Networks, Inc. All rights reserved.
#
"""
VNC pod management for kubernetes
"""
import json
import uuid
from cStringIO import StringIO
from cfgm_common.exceptions import RefsExistError, NoIdError
from cfgm_common.utils import cgitb_hook
from vnc_api.vnc_api import (
InstanceIp, FloatingIp, FloatingIpPool, VirtualMachine,
VirtualMachineInterface, VirtualMachineInterfacePropertiesType,
SecurityGroup)
from kube_manager.vnc.config_db import (
DBBaseKM, VirtualNetworkKM, VirtualRouterKM, VirtualMachineKM,
VirtualMachineInterfaceKM, InstanceIpKM, FloatingIpKM, LoadbalancerKM,
TagKM)
from kube_manager.vnc.vnc_common import VncCommon
from kube_manager.common.kube_config_db import (NamespaceKM, PodKM)
from kube_manager.vnc.vnc_kubernetes_config import (
VncKubernetesConfig as vnc_kube_config)
from kube_manager.vnc.label_cache import XLabelCache
from cStringIO import StringIO
from cfgm_common.utils import cgitb_hook
class VncPod(VncCommon):
vnc_pod_instance = None
def __init__(self, service_mgr, network_policy_mgr):
super(VncPod, self).__init__('Pod')
self._name = type(self).__name__
self._vnc_lib = vnc_kube_config.vnc_lib()
self._label_cache = vnc_kube_config.label_cache()
self._labels = XLabelCache('Pod')
self._service_mgr = service_mgr
self._network_policy_mgr = network_policy_mgr
self._queue = vnc_kube_config.queue()
self._args = vnc_kube_config.args()
self._logger = vnc_kube_config.logger()
if not VncPod.vnc_pod_instance:
VncPod.vnc_pod_instance = self
def _set_label_to_pod_cache(self, new_labels, vm):
namespace_label = self._label_cache. \
_get_namespace_label(vm.pod_namespace)
new_labels.update(namespace_label)
for label in new_labels.items():
key = self._label_cache._get_key(label)
pod_label_cache = self._label_cache.pod_label_cache
self._label_cache._locate_label(key, pod_label_cache, label,
vm.uuid)
vm.pod_labels = new_labels
def _clear_label_to_pod_cache(self, vm):
if not vm.pod_labels:
return
for label in vm.pod_labels.items() or []:
key = self._label_cache._get_key(label)
pod_label_cache = self._label_cache.pod_label_cache
self._label_cache._remove_label(key, pod_label_cache, label,
vm.uuid)
vm.pod_labels = None
def _update_label_to_pod_cache(self, new_labels, vm):
self._clear_label_to_pod_cache(vm)
self._set_label_to_pod_cache(new_labels, vm)
def _get_network(self, pod_id, pod_name, pod_namespace):
"""
Get virtual network to be associated with the pod.
The heuristics to determine which virtual network to use for the pod
is as follows:
if (virtual network is annotated in the pod config):
Use virtual network configured on the pod.
else if (virtual network if annotated in the pod's namespace):
Use virtual network configured on the namespace.
else if (pod is in a isolated namespace):
Use the virtual network associated with isolated namespace.
else:
Use the pod virtual network associated with kubernetes cluster.
"""
# Check for virtual-network configured on the pod.
pod = PodKM.find_by_name_or_uuid(pod_id)
if not pod:
self._logger.notice("%s - Pod %s:%s:%s Not Found"
"(Might Got Delete Event From K8s)"
%(self._name, pod_namespace, pod_name, pod_id))
return
vn_fq_name = pod.get_vn_fq_name()
ns = self._get_namespace(pod_namespace)
# FIXME: Check if ns is not None
# Check of virtual network configured on the namespace.
if not vn_fq_name:
vn_fq_name = ns.get_annotated_network_fq_name()
# If the pod's namespace is isolated, use the isolated virtual
# network.
if not vn_fq_name:
if self._is_pod_network_isolated(pod_namespace):
vn_fq_name = ns.get_isolated_pod_network_fq_name()
# Finally, if no network was found, default to the cluster
# pod network.
if not vn_fq_name:
vn_fq_name = vnc_kube_config.cluster_default_pod_network_fq_name()
vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
return vn_obj
@staticmethod
def _get_namespace(pod_namespace):
return NamespaceKM.find_by_name_or_uuid(pod_namespace)
@staticmethod
def _get_namespace_labels(pod_namespace):
labels = {}
# Get the explicit labels on a pod.
ns = NamespaceKM.find_by_name_or_uuid(pod_namespace)
if ns and ns.labels:
labels = dict(ns.labels)
# Append the implicit namespace tag to a pod.
labels['namespace'] = pod_namespace
return labels
def _is_pod_network_isolated(self, pod_namespace):
return self._get_namespace(pod_namespace).is_isolated()
@staticmethod
def _is_pod_nested():
# Pod is nested if we are configured to run in nested mode.
return DBBaseKM.is_nested()
@staticmethod
def _get_host_ip(pod_name):
pod = PodKM.find_by_name_or_uuid(pod_name)
if pod:
return pod.get_host_ip()
return None
def _get_ip_fabric_forwarding(self, ns_name):
ns = self._get_namespace(ns_name)
if ns:
return ns.get_ip_fabric_forwarding()
return None
def _is_ip_fabric_forwarding_enabled(self, ns_name):
ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name)
if ip_fabric_forwarding != None:
return ip_fabric_forwarding
else:
return self._args.ip_fabric_forwarding
def _create_iip(self, pod_name, pod_namespace, vn_obj, vmi):
# Instance-ip for pods are ALWAYS allocated from pod ipam on this
# VN. Get the subnet uuid of the pod ipam on this VN, so we can request
# an IP from it.
vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid())
if not vn:
# It is possible our cache may not have the VN yet. Locate it.
vn = VirtualNetworkKM.locate(vn_obj.get_uuid())
if self._is_pod_network_isolated(pod_namespace):
vn_namespace = pod_namespace
else:
vn_namespace = 'default'
if self._is_ip_fabric_forwarding_enabled(vn_namespace):
ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
else:
ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
pod_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(ipam_fq_name)
# Create instance-ip.
iip_uuid = str(uuid.uuid1())
iip_name = VncCommon.make_name(pod_name, iip_uuid)
iip_obj = InstanceIp(name=iip_name, subnet_uuid=pod_ipam_subnet_uuid,
display_name=iip_name)
iip_obj.uuid = iip_uuid
iip_obj.add_virtual_network(vn_obj)
# Creation of iip requires the vmi vnc object.
vmi_obj = self._vnc_lib.virtual_machine_interface_read(
fq_name=vmi.fq_name)
iip_obj.add_virtual_machine_interface(vmi_obj)
InstanceIpKM.add_annotations(self, iip_obj, pod_namespace, pod_name)
self._logger.debug("%s: Create IIP from ipam_fq_name [%s]"
" pod_ipam_subnet_uuid [%s]"
" vn [%s] vmi_fq_name [%s]" %\
(self._name, ipam_fq_name, pod_ipam_subnet_uuid,
vn.name, vmi.fq_name))
try:
self._vnc_lib.instance_ip_create(iip_obj)
except RefsExistError:
self._vnc_lib.instance_ip_update(iip_obj)
InstanceIpKM.locate(iip_obj.uuid)
return iip_obj
def _get_host_vmi(self, pod_name):
host_ip = self._get_host_ip(pod_name)
if host_ip:
net_fq_name = vnc_kube_config.cluster_default_network_fq_name()
iip = InstanceIpKM.get_object(host_ip, net_fq_name)
if iip:
for vmi_id in iip.virtual_machine_interfaces:
vm_vmi = VirtualMachineInterfaceKM.get(vmi_id)
if vm_vmi and vm_vmi.host_id:
return vm_vmi
return None
@staticmethod
def _associate_security_groups(vmi_obj, proj_obj, ns):
sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'default-sg'])
sg_obj = SecurityGroup(sg_name, proj_obj)
vmi_obj.add_security_group(sg_obj)
return
def _create_vmi(self, pod_name, pod_namespace, pod_id, vm_obj, vn_obj,
parent_vmi):
proj_fq_name = vnc_kube_config.cluster_project_fq_name(pod_namespace)
proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
vmi_prop = None
if self._is_pod_nested() and parent_vmi:
# Pod is nested.
# Allocate a vlan-id for this pod from the vlan space managed
# in the VMI of the underlay VM.
parent_vmi = VirtualMachineInterfaceKM.get(parent_vmi.uuid)
vlan_id = parent_vmi.alloc_vlan()
vmi_prop = VirtualMachineInterfacePropertiesType(
sub_interface_vlan_tag=vlan_id)
obj_uuid = str(uuid.uuid1())
name = VncCommon.make_name(pod_name, obj_uuid)
vmi_obj = VirtualMachineInterface(
name=name, parent_obj=proj_obj,
virtual_machine_interface_properties=vmi_prop,
display_name=name)
vmi_obj.uuid = obj_uuid
vmi_obj.set_virtual_network(vn_obj)
vmi_obj.set_virtual_machine(vm_obj)
self._associate_security_groups(vmi_obj, proj_obj, pod_namespace)
vmi_obj.port_security_enabled = True
VirtualMachineInterfaceKM.add_annotations(self, vmi_obj, pod_namespace,
pod_name)
try:
vmi_uuid = self._vnc_lib.virtual_machine_interface_create(vmi_obj)
except RefsExistError:
vmi_uuid = self._vnc_lib.virtual_machine_interface_update(vmi_obj)
VirtualMachineInterfaceKM.locate(vmi_uuid)
return vmi_uuid
def _create_vm(self, pod_namespace, pod_id, pod_name, labels):
vm_name = VncCommon.make_name(pod_name, pod_id)
display_name = VncCommon.make_display_name(pod_namespace, pod_name)
vm_obj = VirtualMachine(name=vm_name, display_name=display_name)
vm_obj.uuid = pod_id
VirtualMachineKM.add_annotations(self, vm_obj, pod_namespace, pod_name,
k8s_uuid=str(pod_id),
labels=json.dumps(labels))
try:
self._vnc_lib.virtual_machine_create(vm_obj)
except RefsExistError:
vm_obj = self._vnc_lib.virtual_machine_read(id=pod_id)
VirtualMachineKM.locate(vm_obj.uuid)
return vm_obj
def _link_vm_to_node(self, vm_obj, pod_node, node_ip):
if node_ip is None:
return
vm = VirtualMachineKM.locate(vm_obj.uuid)
if vm:
vm.node_ip = node_ip
vr_uuid = VirtualRouterKM.get_ip_addr_to_uuid(node_ip)
if vr_uuid is None:
for vr in VirtualRouterKM.values():
if vr.name == pod_node:
vr_uuid = vr.uuid
if vr_uuid is None:
self._logger.debug("%s - Vrouter %s Not Found for Pod %s"
%(self._name, node_ip, vm_obj.uuid))
return
try:
vrouter_obj = self._vnc_lib.virtual_router_read(id=vr_uuid)
except Exception as e:
self._logger.debug("%s - Vrouter %s Not Found for Pod %s"
%(self._name, node_ip, vm_obj.uuid))
string_buf = StringIO()
cgitb_hook(file=string_buf, format="text")
err_msg = string_buf.getvalue()
self._logger.error("_link_vm_to_node: %s - %s" %(self._name, err_msg))
return
self._vnc_lib.ref_update('virtual-router', vrouter_obj.uuid,
'virtual-machine', vm_obj.uuid, None, 'ADD')
if vm:
vm.virtual_router = vrouter_obj.uuid
def _check_pod_uuid_change(self, pod_uuid, pod_name):
vm_fq_name = [pod_name]
vm_uuid = LoadbalancerKM.get_fq_name_to_uuid(vm_fq_name)
if vm_uuid != pod_uuid:
self.vnc_pod_delete(vm_uuid)
def _set_tags_on_pod_vmi(self, pod_id, vmi_obj=None):
vmi_obj_list = []
if not vmi_obj:
vm = VirtualMachineKM.get(pod_id)
if vm:
for vmi_id in list(vm.virtual_machine_interfaces):
vmi_obj_list.append(
self._vnc_lib.virtual_machine_interface_read(id=vmi_id))
else:
vmi_obj_list.append(vmi_obj)
for vmi_obj in vmi_obj_list:
self._vnc_lib.set_tags(vmi_obj, self._labels.get_labels_dict(pod_id))
def _unset_tags_on_pod_vmi(self, pod_id, vmi_id=None, labels={}):
vmi_obj_list = []
if not vmi_id:
vm = VirtualMachineKM.get(pod_id)
if vm:
for vmi_id in list(vm.virtual_machine_interfaces):
vmi_obj_list.append(self._vnc_lib.virtual_machine_interface_read(id=vmi_id))
else:
vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_id)
vmi_obj_list.append(vmi_obj)
for vmi_obj in vmi_obj_list:
if not labels:
for k,v in self._labels.get_labels_dict(pod_id).iteritems():
self._vnc_lib.unset_tag(vmi_obj, k)
else:
for k,v in labels.iteritems():
self._vnc_lib.unset_tag(vmi_obj, k)
def vnc_pod_add(self, pod_id, pod_name, pod_namespace, pod_node, node_ip,
labels, vm_vmi):
vm = VirtualMachineKM.get(pod_id)
if vm:
vm.pod_namespace = pod_namespace
if not vm.virtual_router:
self._link_vm_to_node(vm, pod_node, node_ip)
self._set_label_to_pod_cache(labels, vm)
# Update tags.
self._set_tags_on_pod_vmi(pod_id)
return vm
else:
self._check_pod_uuid_change(pod_id, pod_name)
vn_obj = self._get_network(pod_id, pod_name, pod_namespace)
if not vn_obj:
return
vm_obj = self._create_vm(pod_namespace, pod_id, pod_name, labels)
vmi_uuid = self._create_vmi(pod_name, pod_namespace, pod_id, vm_obj, vn_obj,
vm_vmi)
vmi = VirtualMachineInterfaceKM.get(vmi_uuid)
if self._is_pod_nested() and vm_vmi:
# Pod is nested.
# Link the pod VMI to the VMI of the underlay VM.
self._vnc_lib.ref_update('virtual-machine-interface', vm_vmi.uuid,
'virtual-machine-interface', vmi_uuid,
None, 'ADD')
self._vnc_lib.ref_update('virtual-machine-interface', vmi_uuid,
'virtual-machine-interface', vm_vmi.uuid,
None, 'ADD')
# get host id for vm vmi
vr_uuid = None
for vr in VirtualRouterKM.values():
if vr.name == vm_vmi.host_id:
vr_uuid = vr.uuid
break
if not vr_uuid:
# Unable to determine VRouter for the parent VM.
#
# HACK ALERT
#
# It is possible that this is a case of FQDN mismatch between
# the host name associated with the VM and the host name
# associated with the corresponding vrouter. So try to look for
# vrouter again with a non-FQDN name.
#
# This needs to be removed when provisioning can guarantee that
# FQDN will be uniform across all config objects.
#
if '.' in vm_vmi.host_id:
# Host name on VM is a FQNAME. Ignore domain name.
host_id_prefix = vm_vmi.host_id.split('.')[0]
for vr in VirtualRouterKM.values():
if vr.name == host_id_prefix:
vr_uuid = vr.uuid
break
if not vr_uuid:
self._logger.error("No virtual-router object found for host: "
+ vm_vmi.host_id
+ ". Unable to add VM reference to a"
+ " valid virtual-router")
return
self._vnc_lib.ref_update('virtual-router', vr_uuid,
'virtual-machine', vm_obj.uuid, None,
'ADD')
self._create_iip(pod_name, pod_namespace, vn_obj, vmi)
if not self._is_pod_nested():
self._link_vm_to_node(vm_obj, pod_node, node_ip)
vm = VirtualMachineKM.locate(pod_id)
if vm:
vm.pod_namespace = pod_namespace
vm.pod_node = pod_node
vm.node_ip = node_ip
self._set_label_to_pod_cache(labels, vm)
self._set_tags_on_pod_vmi(pod_id)
return vm
def vnc_pod_update(self, pod_id, pod_name, pod_namespace, pod_node, node_ip, labels,
vm_vmi):
vm = VirtualMachineKM.get(pod_id)
if not vm:
# If the vm is not created yet, do so now.
vm = self.vnc_pod_add(pod_id, pod_name, pod_namespace,
pod_node, node_ip, labels, vm_vmi)
if not vm:
return
vm.pod_namespace = pod_namespace
if not vm.virtual_router:
self._link_vm_to_node(vm, pod_node, node_ip)
self._update_label_to_pod_cache(labels, vm)
self._set_tags_on_pod_vmi(pod_id)
return vm
def vnc_port_delete(self, vmi_id, pod_id):
self._unset_tags_on_pod_vmi(pod_id, vmi_id=vmi_id)
vmi = VirtualMachineInterfaceKM.get(vmi_id)
if not vmi:
return
for iip_id in list(vmi.instance_ips):
try:
self._vnc_lib.instance_ip_delete(id=iip_id)
except NoIdError:
pass
# Cleanup floating ip's on this interface.
for fip_id in list(vmi.floating_ips):
try:
self._vnc_lib.ref_update('floating-ip', fip_id,
'virtual-machine-interface', vmi_id, None,
'DELETE')
FloatingIpKM.update(fip_id)
except NoIdError:
pass
try:
self._vnc_lib.virtual_machine_interface_delete(id=vmi_id)
except NoIdError:
pass
VirtualMachineInterfaceKM.delete(vmi_id)
def vnc_pod_delete(self, pod_id):
vm = VirtualMachineKM.get(pod_id)
if not vm:
return
# If this VM's vrouter info is not available in our config db,
# then it is a case of race between delete and ref updates.
# So explicitly update this entry in config db.
if not vm.virtual_router:
try:
vm.update()
except NoIdError:
pass
self._clear_label_to_pod_cache(vm)
try:
vm_obj = self._vnc_lib.virtual_machine_read(id=vm.uuid)
except NoIdError:
# Unable to find VM object in cache. Cleanup local cache.
VirtualMachineKM.delete(vm.uuid)
return
if vm.virtual_router:
self._vnc_lib.ref_update('virtual-router', vm.virtual_router,
'virtual-machine', vm.uuid, None,
'DELETE')
for vmi_id in list(vm.virtual_machine_interfaces):
self.vnc_port_delete(vmi_id, pod_id)
try:
self._vnc_lib.virtual_machine_delete(id=pod_id)
except NoIdError:
pass
# Cleanup local cache.
VirtualMachineKM.delete(pod_id)
def _create_pod_event(self, event_type, pod_id, vm_obj):
event = {}
object = {}
object['kind'] = 'Pod'
object['metadata'] = {}
object['metadata']['uid'] = pod_id
object['metadata']['labels'] = vm_obj.pod_labels
if event_type == 'delete':
event['type'] = 'DELETED'
event['object'] = object
self._queue.put(event)
return
def _sync_pod_vm(self):
vm_uuid_set = set(VirtualMachineKM.keys())
pod_uuid_set = set(PodKM.keys())
deleted_pod_set = vm_uuid_set - pod_uuid_set
for pod_uuid in deleted_pod_set:
vm = VirtualMachineKM.get(pod_uuid)
if not vm or\
vm.owner != 'k8s' or\
vm.cluster != vnc_kube_config.cluster_name():
continue
self._create_pod_event('delete', pod_uuid, vm)
for uuid in pod_uuid_set:
vm = VirtualMachineKM.get(uuid)
if not vm or\
vm.owner != 'k8s' or\
vm.cluster != vnc_kube_config.cluster_name():
continue
if not vm.virtual_router and vm.pod_node and vm.node_ip:
self._link_vm_to_node(vm, vm.pod_node, vm.node_ip)
return
def pod_timer(self):
self._sync_pod_vm()
return
def process(self, event):
event_type = event['type']
kind = event['object'].get('kind')
pod_namespace = event['object']['metadata'].get('namespace')
pod_name = event['object']['metadata'].get('name')
pod_id = event['object']['metadata'].get('uid')
labels = event['object']['metadata'].get('labels', {})
print("%s - Got %s %s %s:%s:%s"
%(self._name, event_type, kind, pod_namespace, pod_name, pod_id))
self._logger.debug("%s - Got %s %s %s:%s:%s"
%(self._name, event_type, kind, pod_namespace,
pod_name, pod_id))
if event['type'] == 'ADDED' or event['type'] == 'MODIFIED':
# Proceed ONLY if host network is specified.
pod_node = event['object']['spec'].get('nodeName')
node_ip = event['object']['status'].get('hostIP')
host_network = event['object']['spec'].get('hostNetwork')
if host_network:
return
# If the pod is nested, proceed ONLY if host vmi is found.
vm_vmi = None
if self._is_pod_nested():
vm_vmi = self._get_host_vmi(pod_name)
if not vm_vmi:
self._logger.debug(
"Nested Mode: Pod processing skipped. Unable to "
"determine host vmi for Pod[%s] Namespace[%s] "
"Event[%s] HostIP[%s])"
%(pod_name, pod_namespace, event_type,
self._get_host_ip(pod_name)))
return
# Add implicit namespace labels on this pod.
labels.update(self._get_namespace_labels(pod_namespace))
self._labels.process(pod_id, labels)
if event['type'] == 'ADDED':
vm = self.vnc_pod_add(pod_id, pod_name, pod_namespace,
pod_node, node_ip, labels, vm_vmi)
else:
vm = self.vnc_pod_update(pod_id, pod_name,
pod_namespace, pod_node, node_ip, labels, vm_vmi)
elif event['type'] == 'DELETED':
self.vnc_pod_delete(pod_id)
self._labels.process(pod_id)
else:
self._logger.warning(
'Unknown event type: "{}" Ignoring'.format(event['type']))
@classmethod
def add_labels(cls, pod_id_list, labels):
if not cls.vnc_pod_instance:
return
for pod_id in pod_id_list:
cls.vnc_pod_instance._labels.append(pod_id, labels)
cls.vnc_pod_instance._set_tags_on_pod_vmi(pod_id)
@classmethod
def remove_labels(cls, pod_id_list, labels):
if not cls.vnc_pod_instance:
return
for pod_id in pod_id_list:
cls.vnc_pod_instance._unset_tags_on_pod_vmi(pod_id, labels=labels)
cls.vnc_pod_instance._labels.remove(pod_id, labels)
| 38.609642
| 96
| 0.596351
|
8a2aabe2c1ef6825a04c8c1dc6f9eedf78a8c7c8
| 8,393
|
py
|
Python
|
fisb/level0/utilities.py
|
rand-projects/fisb-decode
|
870f6be8b7e7013fcba0c4f2f894aae425700563
|
[
"BSD-2-Clause-Patent"
] | 7
|
2021-05-29T13:12:20.000Z
|
2021-12-26T02:38:34.000Z
|
fisb/level0/utilities.py
|
rand-projects/fisb-decode
|
870f6be8b7e7013fcba0c4f2f894aae425700563
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
fisb/level0/utilities.py
|
rand-projects/fisb-decode
|
870f6be8b7e7013fcba0c4f2f894aae425700563
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
"""Module containing level0 utility functions.
"""
import sys, os, time
from datetime import timezone, datetime
import fisb.level0.level0Config as cfg
# String for performing translation of dlac to text
#
# FIS-B uses a form of DLAC encoding. Some non-printing characters
# have been changed to printing characters (that do not otherwise
# appear in DLAC) as follows:
#
# 000000 ETX ~ (End of Text Marker)
# 011011 NC ~ (Null Character)
# 011100 TAB \t (Tab) However, what this really means is that the next
# six-bit character is the number of spaces
# to insert (1-64)
# 011101 RS ~ (Record Separator)
# 011111 CC | (Change Cypher) Not used in FIS-B for that purpose.
dlacString = "~ABCDEFGHIJKLMNOPQRSTUVWXYZ~\t~\n| !\"#$%&'()*+,-./0123456789:;<=>?"
triggerList = []
def textToDlac(str):
# Make sure string length is divisible by three.
while (len(str) % 4) != 0:
str = str + '~'
# Make upper case
str = str.upper()
# Get number of bytes this will turn into (4 characters in 3 bytes)
byteCount = int((len(str) / 4) * 3)
# Number of bytes to encode.
ba = bytearray(byteCount)
# Loop for each 4 characters, make 1 3-byte set.
baIdx = 0
strIdx = 0
for _ in range(0, int(byteCount / 3)):
c1 = dlacString.index(str[strIdx])
c2 = dlacString.index(str[strIdx + 1])
c3 = dlacString.index(str[strIdx + 2])
c4 = dlacString.index(str[strIdx + 3])
strIdx += 4
ba[baIdx] = (c1 << 2) | ((c2 & 0x30) >> 4)
ba[baIdx + 1] = ((c2 & 0x0F) << 4) | ((c3 & 0x3C) >> 2)
ba[baIdx + 2] = ((c3 & 0x3) << 6) | c4
baIdx += 3
return ba.hex()
def dlacToText(byteArray, startIndex, bytesToDecode):
"""Convert DLAC 6-bit string to text.
Given an index into a byte array (containing DLAC characters)
and the number of bits from the byte array (``bytesToDecode``), return
a text string.
``bytesToDecode`` is the number of bytes, not the number of DLAC characters.
Args:
byteArray (byte array): Byte array to extract the DLAC text from.
startIndex (int): Index into the byte array.
bytesToDecode (int): Number of bytes to use for the encoding.
Returns:
str: Text string encoded from the DLAC characters.
Will remove ETX, NC, and RS characters.
"""
text = ''
tab = False
for i in range(0, bytesToDecode):
m = i % 3
if m == 0:
j = (byteArray[startIndex + i] & 0xFC) >> 2
(text, tab) = addDlacChar(text, tab, j)
elif m == 1:
j = ((byteArray[startIndex + i - 1] & 0x03) << 4) + ((byteArray[startIndex + i] & 0xF0) >> 4)
(text, tab) = addDlacChar(text, tab, j)
else:
j = ((byteArray[startIndex + i - 1] & 0x0F) << 2) + ((byteArray[startIndex + i] & 0xC0) >> 6)
(text, tab) = addDlacChar(text, tab, j)
j = (byteArray[startIndex + i] & 0x3F)
(text, tab) = addDlacChar(text, tab, j)
return text.replace('~','')
# There are 3 forms of lat and long decoding, each with a different bit
# length. These contants are used as the 'bitFactor' argument in
# convertRawLongitudeLatitude
GEO_24_BITS = 360.0/(2**24)
GEO_19_BITS = 360.0/(2**19)
GEO_18_BITS = 360.0/(2**18)
def convertRawLongitudeLatitude(rawLongitude, rawLatitude, bitFactor):
"""Convert raw coordinates to standard ones.
Change native coordinates into normal longitude and latitude. The
numbers are truncated to 6 decimal places since that approximates
typical GPS coordinates.
Args:
rawLongitude (int): Longitude directly from data.
rawLatitude (int): Latitude directly from data.
bitFactor (float): Raw coordinates can be of different bit lengths.
This is the conversion factor: the correct one is GEO_xx_BITS, where
'xx' is the bit size of the raw data.
Returns:
tuple: Tuple of:
1. longitude
2. latitude
"""
longitude = rawLongitude * bitFactor
if longitude > 180:
longitude = longitude - 360.0
latitude = rawLatitude * bitFactor
if latitude > 90:
latitude = latitude - 180.0
# Attempt to preserve only 6 places after the decimal (akin
# to GPS precision)
longitude = float(round(longitude, 6))
latitude = float(round(latitude, 6))
return (longitude, latitude)
def addDlacChar(str, tab, chr):
"""Add a DLAC character to the supplied ``str``.
Tab characters in DLAC are actually a form of run-length encoding. The tab character is
followed by the number of spaces to add.
This is pretty much exclusively used by ``dlacToText()``.
Args:
str (str): Text string to add a character to.
tab (bool): Boolean value, which if true, means ``tab`` contains the
number of spaces to add (as opposed to adding the character ``chr``).
chr (byte): DLAC character to add. If ``tab`` is ``True``, this is the number of
spaces to add.
Returns:
tuple: Two values tuple return:
1. new string
2. new value of ``tab`` to be passed on the next
call to ``addDlacChar()``.
"""
if tab:
# Test groups only seem to use 4 bits rather than 6 for tab
if cfg.DLAC_4BIT_HACK:
chr = chr & 0xF
str += " " * chr
tab = False
elif chr == 28: #tab
tab = True
else:
str += dlacString[chr]
return (str, tab)
def createStationName(longitude, latitude):
"""Create station name from the station's longitude and latitude.
For various purposes (ex: CRLs) we need the station the message
came from. We also sometimes (for the standard) need to display
the longitude and latitude of the station. This function justs
appends the latitude to the longitude separated by a ``~``.
It will work as a station ID and we can pull it apart for presenting
the longitude and latitude of the station.
Note that both the ``longitude`` and ``latitude`` arguments will have
their decimal points truncated to no more than 6.
Args:
longitude (float): Station's longitude
latitude (float): Station's latitude
Returns:
str: Station string containing latitude concatinated to
longitude, separated by a tilde (``~``).
"""
# Just append the lat and long using '~'. That way, station
# name can be used for coordinates (standard states you need
# to show the coordinates at times).
return str(latitude) + '~' + str(longitude)
def setTriggerList(trgrList):
"""Set the trigger list for testing.
Args:
trgrList (list): Trigger list to use. This is obtained from
:func:`db.harvest.testing.createTriggerList`. See that
function for the definition of list items.
"""
global triggerList
triggerList = trgrList
def checkForTrigger(utcSecs):
"""Check if any triggers have occurred before specified time.
Will print any triggers that have occurred before specified
time and remove them from the trigger list.
Args:
utcSecs (float): UTC time in seconds.
"""
itemsToDelete = 0
for triggerItems in triggerList:
if triggerItems[0] < utcSecs:
itemsToDelete += 1
printTrigger(triggerItems)
for _ in range(0, itemsToDelete):
triggerList.pop(0)
def printAllTriggers():
"""Print any remaining triggers.
Called at the end of a run to print any remaining triggers.
"""
for triggerItems in triggerList:
printTrigger(triggerItems)
def printTrigger(triggerItems):
"""Print a trigger item.
Prints information about a trigger on standard output.
Args:
triggerItem (list)
"""
dtTime = datetime.fromtimestamp(triggerItems[0], tz=timezone.utc)
timeStr = dtTime.__format__('%Y-%m-%dT%H:%M:%S') +\
'.{:03}Z'.format(int((triggerItems[0] % 1) * 1000))
x = '#===========================================================' + \
'\n# TRIGGER ({}): {} ({})\n#{}\n#'
print(x.format(triggerItems[1], timeStr, triggerItems[3], triggerItems[2]), flush=True)
| 32.785156
| 105
| 0.612892
|
c7b6aadc85cf553352f5850b574fccfbcf12b689
| 4,322
|
py
|
Python
|
girder/molecules/molecules/models/experimental.py
|
bnmajor/mongochemserver
|
aa76ab6e7f749c3e893f27e208984b6ed2d4b2b5
|
[
"BSD-3-Clause"
] | 14
|
2015-05-04T16:40:48.000Z
|
2021-07-13T08:00:30.000Z
|
girder/molecules/molecules/models/experimental.py
|
bnmajor/mongochemserver
|
aa76ab6e7f749c3e893f27e208984b6ed2d4b2b5
|
[
"BSD-3-Clause"
] | 88
|
2015-07-24T07:58:43.000Z
|
2021-02-23T19:37:13.000Z
|
girder/molecules/molecules/models/experimental.py
|
bnmajor/mongochemserver
|
aa76ab6e7f749c3e893f27e208984b6ed2d4b2b5
|
[
"BSD-3-Clause"
] | 8
|
2015-06-12T20:54:39.000Z
|
2021-04-09T01:07:15.000Z
|
from jsonschema import validate, ValidationError
from girder.models.model_base import AccessControlledModel, ValidationException
from girder.constants import AccessType
class Experimental(AccessControlledModel):
'''
{
"spectrumType": "Infrared",
"experimentalTechnique": "InfraRed Multiphoton Dissociation - IRMPD",
"id": "[UO2(TMGA-R=CH2)3]2+",
"molecularFormula" : "C27H54N6O8U1",
"measuredSpectrum": {
"frequencies": {
"units": "cm-1"
"values": []
},
"intensities": {
"units": "arbitrary units",
"values": []
}
}
}
'''
schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'type': 'object',
'required': ['spectrumType', 'experimentalTechnique', 'id',
'molecularFormula', 'measuredSpectrum'],
'properties': {
'spectrumType': {
'type': 'string'
},
'experimentalTechnique': {
'type': 'string'
},
'id': {
'type': 'string'
},
'molecularFormula': {
'type': 'string'
},
'measuredSpectrum': {
'type': 'object',
'required': ['frequencies', 'intensities'],
'properties': {
'frequencies': {
'type': 'object',
'required': ['units', 'values'],
'properties': {
'units': {
'type': 'string'
},
'values': {
'type': 'array',
'items': {
'type': 'number'
}
}
}
},
'intensities': {
'type': 'object',
'required': ['units', 'values'],
'properties': {
'units': {
'type': 'string'
},
'values': {
'type': 'array',
'items': {
'type': 'number'
}
}
}
}
}
}
}
}
def __init__(self):
super(Experimental, self).__init__()
def initialize(self):
self.name = 'experimental'
self.ensureIndices(['molecularFormula'])
self.exposeFields(level=AccessType.READ, fields=(
'spectrumType', 'experimentalTechnique', 'id', '_id',
'molecularFormula', 'measuredSpectrum', 'name'))
def filter(self, calc, user):
calc = super(Experimental, self).filter(doc=calc, user=user)
del calc['_accessLevel']
del calc['_modelType']
return calc
def validate(self, doc):
try:
validate(doc, Experimental.schema)
except ValidationError as ex:
raise ValidationException(ex.message)
# Make sure arrays are same length
frequencies = doc['measuredSpectrum']['frequencies']
intensities = doc['measuredSpectrum']['intensities']
if len(frequencies) != len(intensities):
raise ValidationException('Array length must match')
return doc
def create(self, facility_used, spectrum_type, experimental_technique, id,
molecular_formula, measured_spectrum):
experiment = {
'facilityUsed': facility_used,
'spectrumType': spectrum_type,
'experimentalTechnique': experimental_technique,
'id': id,
'molecularFormula' : molecular_formula,
'measuredSpectrum' : measured_spectrum,
'name': '%s (%s)' % (experimental_technique, spectrum_type)
}
# For now set as public
self.setPublic(experiment, True)
return self.save(experiment)
| 31.779412
| 79
| 0.437298
|
7079408bfa9dcde8b9b4a47041d2a026a8840017
| 6,717
|
py
|
Python
|
tools/execsnoop.py
|
cippaciong/bcc
|
aa4aa5211e6fe2b6e830bf511905ec8e3528449a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tools/execsnoop.py
|
cippaciong/bcc
|
aa4aa5211e6fe2b6e830bf511905ec8e3528449a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tools/execsnoop.py
|
cippaciong/bcc
|
aa4aa5211e6fe2b6e830bf511905ec8e3528449a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# execsnoop Trace new processes via exec() syscalls.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: execsnoop [-h] [-t] [-x] [-n NAME]
#
# This currently will print up to a maximum of 19 arguments, plus the process
# name, so 20 fields in total (MAXARG).
#
# This won't catch all new processes: an application may fork() but not exec().
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 07-Feb-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
from bcc.utils import ArgString, printb
import bcc.utils as utils
import argparse
import ctypes as ct
import re
import time
from collections import defaultdict
# arguments
examples = """examples:
./execsnoop # trace all exec() syscalls
./execsnoop -x # include failed exec()s
./execsnoop -t # include timestamps
./execsnoop -q # add "quotemarks" around arguments
./execsnoop -n main # only print command lines containing "main"
./execsnoop -l tpkg # only print command where arguments contains "tpkg"
"""
parser = argparse.ArgumentParser(
description="Trace exec() syscalls",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-x", "--fails", action="store_true",
help="include failed exec()s")
parser.add_argument("-q", "--quote", action="store_true",
help="Add quotemarks (\") around arguments."
)
parser.add_argument("-n", "--name",
type=ArgString,
help="only print commands matching this name (regex), any arg")
parser.add_argument("-l", "--line",
type=ArgString,
help="only print commands where arg contains this line (regex)")
parser.add_argument("--max-args", default="20",
help="maximum number of arguments parsed and displayed, defaults to 20")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#include <linux/fs.h>
#define ARGSIZE 128
enum event_type {
EVENT_ARG,
EVENT_RET,
};
struct data_t {
u32 pid; // PID as in the userspace term (i.e. task->tgid in kernel)
char comm[TASK_COMM_LEN];
enum event_type type;
char argv[ARGSIZE];
int retval;
};
BPF_PERF_OUTPUT(events);
static int __submit_arg(struct pt_regs *ctx, void *ptr, struct data_t *data)
{
bpf_probe_read(data->argv, sizeof(data->argv), ptr);
events.perf_submit(ctx, data, sizeof(struct data_t));
return 1;
}
static int submit_arg(struct pt_regs *ctx, void *ptr, struct data_t *data)
{
const char *argp = NULL;
bpf_probe_read(&argp, sizeof(argp), ptr);
if (argp) {
return __submit_arg(ctx, (void *)(argp), data);
}
return 0;
}
int do_sys_execve(struct pt_regs *ctx,
const char __user *filename,
const char __user *const __user *__argv,
const char __user *const __user *__envp)
{
// create data here and pass to submit_arg to save stack space (#555)
struct data_t data = {};
data.pid = bpf_get_current_pid_tgid() >> 32;
bpf_get_current_comm(&data.comm, sizeof(data.comm));
data.type = EVENT_ARG;
__submit_arg(ctx, (void *)filename, &data);
// skip first arg, as we submitted filename
#pragma unroll
for (int i = 1; i < MAXARG; i++) {
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0)
goto out;
}
// handle truncated argument list
char ellipsis[] = "...";
__submit_arg(ctx, (void *)ellipsis, &data);
out:
return 0;
}
int do_ret_sys_execve(struct pt_regs *ctx)
{
struct data_t data = {};
data.pid = bpf_get_current_pid_tgid() >> 32;
bpf_get_current_comm(&data.comm, sizeof(data.comm));
data.type = EVENT_RET;
data.retval = PT_REGS_RC(ctx);
events.perf_submit(ctx, &data, sizeof(data));
return 0;
}
"""
bpf_text = bpf_text.replace("MAXARG", args.max_args)
if args.ebpf:
print(bpf_text)
exit()
# initialize BPF
b = BPF(text=bpf_text)
execve_fnname = b.get_syscall_fnname("execve")
b.attach_kprobe(event=execve_fnname, fn_name="do_sys_execve")
b.attach_kretprobe(event=execve_fnname, fn_name="do_ret_sys_execve")
# header
if args.timestamp:
print("%-8s" % ("TIME(s)"), end="")
print("%-16s %-6s %-6s %3s %s" % ("PCOMM", "PID", "PPID", "RET", "ARGS"))
TASK_COMM_LEN = 16 # linux/sched.h
ARGSIZE = 128 # should match #define in C above
class Data(ct.Structure):
_fields_ = [
("pid", ct.c_uint),
("comm", ct.c_char * TASK_COMM_LEN),
("type", ct.c_int),
("argv", ct.c_char * ARGSIZE),
("retval", ct.c_int),
]
class EventType(object):
EVENT_ARG = 0
EVENT_RET = 1
start_ts = time.time()
argv = defaultdict(list)
# TODO: This is best-effort PPID matching. Short-lived processes may exit
# before we get a chance to read the PPID. This should be replaced with
# fetching PPID via C when available (#364).
def get_ppid(pid):
try:
with open("/proc/%d/status" % pid) as status:
for line in status:
if line.startswith("PPid:"):
return int(line.split()[1])
except IOError:
pass
return 0
# process event
def print_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data)).contents
skip = False
if event.type == EventType.EVENT_ARG:
argv[event.pid].append(event.argv)
elif event.type == EventType.EVENT_RET:
if event.retval != 0 and not args.fails:
skip = True
if args.name and not re.search(bytes(args.name), event.comm):
skip = True
if args.line and not re.search(bytes(args.line),
b' '.join(argv[event.pid])):
skip = True
if args.quote:
argv[event.pid] = [
"\"" + arg.replace("\"", "\\\"") + "\""
for arg in argv[event.pid]
]
if not skip:
if args.timestamp:
print("%-8.3f" % (time.time() - start_ts), end="")
ppid = get_ppid(event.pid)
ppid = b"%d" % ppid if ppid > 0 else b"?"
printb(b"%-16s %-6d %-6s %3d %s" % (event.comm, event.pid,
ppid, event.retval, b' '.join(argv[event.pid])))
try:
del(argv[event.pid])
except Exception:
pass
# loop with callback to print_event
b["events"].open_perf_buffer(print_event)
while 1:
b.perf_buffer_poll()
| 29.460526
| 79
| 0.63183
|
1f18d0ea075bee82b9a678b45f7c59af0ef0e8d1
| 1,137
|
py
|
Python
|
tests/test_instance_wrapper.py
|
CodeYellowBV/django-tally
|
a705821050da912fb8dabd56c41c040ea0a00a21
|
[
"MIT"
] | null | null | null |
tests/test_instance_wrapper.py
|
CodeYellowBV/django-tally
|
a705821050da912fb8dabd56c41c040ea0a00a21
|
[
"MIT"
] | null | null | null |
tests/test_instance_wrapper.py
|
CodeYellowBV/django-tally
|
a705821050da912fb8dabd56c41c040ea0a00a21
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django_tally.user_def.instance_wrapper import InstanceWrapper
from django_tally.user_def.lang import KW
from .testapp.models import Foo, Baz
class InstanceWrapperTest(TestCase):
def test_instance_wapper(self):
foo = Foo(value=5)
foo.save()
baz = Baz(foo=foo)
baz.save()
wrapped_baz = InstanceWrapper(baz)
self.assertEqual(wrapped_baz[KW('__class__')], 'Baz')
self.assertEqual(wrapped_baz[KW('id')], baz.id)
self.assertEqual(wrapped_baz[KW('file')], None)
with self.assertRaises(KeyError):
wrapped_baz[KW('foobar')]
wrapped_foo = wrapped_baz[KW('foo')]
self.assertEqual(wrapped_foo[KW('__class__')], 'Foo')
self.assertEqual(wrapped_foo[KW('value')], 5)
wrapped_bazs = wrapped_foo[KW('bazs')]
self.assertEqual(len(wrapped_bazs), 1)
self.assertEqual(
wrapped_bazs[0][KW('__class__')],
wrapped_baz[KW('__class__')],
)
self.assertEqual(
wrapped_bazs[0][KW('id')],
wrapped_baz[KW('id')],
)
| 30.72973
| 66
| 0.621812
|
46da81be280601cd761b431f4abf68df386ec9cd
| 438
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/scatter/marker/line/_reversescale.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/scatter/marker/line/_reversescale.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/scatter/marker/line/_reversescale.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class ReversescaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="reversescale", parent_name="scatter.marker.line", **kwargs
):
super(ReversescaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
| 31.285714
| 85
| 0.66895
|
7162dee7d39141585fac04a0bb550dae57226160
| 7,638
|
py
|
Python
|
datasets/ollie/ollie.py
|
dkajtoch/datasets
|
12ef7f0d541a5aca5b29ebc2dddf5e1214f0e3e9
|
[
"Apache-2.0"
] | 9
|
2021-04-26T14:43:52.000Z
|
2021-11-08T09:47:24.000Z
|
datasets/ollie/ollie.py
|
dkajtoch/datasets
|
12ef7f0d541a5aca5b29ebc2dddf5e1214f0e3e9
|
[
"Apache-2.0"
] | null | null | null |
datasets/ollie/ollie.py
|
dkajtoch/datasets
|
12ef7f0d541a5aca5b29ebc2dddf5e1214f0e3e9
|
[
"Apache-2.0"
] | 1
|
2022-02-28T18:08:09.000Z
|
2022-02-28T18:08:09.000Z
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ollie"""
from __future__ import absolute_import, division, print_function
import bz2
import datasets
_CITATION = """\
@inproceedings{ollie-emnlp12,
author = {Mausam and Michael Schmitz and Robert Bart and Stephen Soderland and Oren Etzioni},
title = {Open Language Learning for Information Extraction},
booktitle = {Proceedings of Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CONLL)},
year = {2012}
}"""
_DESCRIPTION = """The Ollie dataset includes two configs for the data
used to train the Ollie informatation extraction algorithm, for 18M
sentences and 3M sentences respectively.
This data is for academic use only. From the authors:
Ollie is a program that automatically identifies and extracts binary
relationships from English sentences. Ollie is designed for Web-scale
information extraction, where target relations are not specified in
advance.
Ollie is our second-generation information extraction system . Whereas
ReVerb operates on flat sequences of tokens, Ollie works with the
tree-like (graph with only small cycles) representation using
Stanford's compression of the dependencies. This allows Ollie to
capture expression that ReVerb misses, such as long-range relations.
Ollie also captures context that modifies a binary relation. Presently
Ollie handles attribution (He said/she believes) and enabling
conditions (if X then).
More information is available at the Ollie homepage:
https://knowitall.github.io/ollie/
"""
_LICENSE = """The University of Washington acamdemic license:
https://raw.githubusercontent.com/knowitall/ollie/master/LICENSE
"""
_URLs = {
"ollie_lemmagrep": "http://knowitall.cs.washington.edu/ollie/data/lemmagrep.txt.bz2",
"ollie_patterned": "http://knowitall.cs.washington.edu/ollie/data/patterned-all.txt.bz2",
}
class Ollie(datasets.GeneratorBasedBuilder):
"""Ollie dataset for knowledge bases and knowledge graphs and underlying sentences."""
VERSION = datasets.Version("0.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="ollie_lemmagrep", description="The Ollie training data", version="1.1.0"),
datasets.BuilderConfig(
name="ollie_patterned", description="The Ollie data used in the Ollie paper.", version="1.1.0"
),
]
DEFAULT_CONFIG_NAME = "ollie_lemmagrep"
def _info(self):
if self.config.name == "ollie_lemmagrep":
features = datasets.Features(
{
"arg1": datasets.Value("string"),
"arg2": datasets.Value("string"),
"rel": datasets.Value("string"),
"search_query": datasets.Value("string"),
"sentence": datasets.Value("string"),
"words": datasets.Value("string"),
"pos": datasets.Value("string"),
"chunk": datasets.Value("string"),
"sentence_cnt": datasets.Value("string"),
}
)
else:
features = datasets.Features(
{
"rel": datasets.Value("string"),
"arg1": datasets.Value("string"),
"arg2": datasets.Value("string"),
"slot0": datasets.Value("string"),
"search_query": datasets.Value("string"),
"pattern": datasets.Value("string"),
"sentence": datasets.Value("string"),
"parse": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage="https://knowitall.github.io/ollie/",
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
my_urls = _URLs[self.config.name]
data_dir = dl_manager.download_and_extract(my_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_dir,
"split": "train",
},
),
]
def _generate_examples(self, filepath, split):
""" Yields examples from the Ollie predicates and sentences. """
with bz2.open(filepath, "rt") as f:
id_ = -1
if self.config.name == "ollie_lemmagrep":
for row in f:
row = row.strip().split("\t")
id_ += 1
if len(row) == 8:
yield id_, {
"arg1": row[0].strip(),
"arg2": row[1].strip(),
"rel": "",
"search_query": row[2].strip(),
"sentence": row[3].strip(),
"words": row[4].strip(),
"pos": row[5].strip(),
"chunk": row[6].strip(),
"sentence_cnt": row[7].strip(),
}
else:
yield id_, {
"arg1": row[1].strip(),
"arg2": row[2].strip(),
"rel": row[0].strip(),
"search_query": row[3].strip(),
"sentence": row[4].strip(),
"words": row[5].strip(),
"pos": row[6].strip(),
"chunk": row[7].strip(),
"sentence_cnt": row[8].strip(),
}
else:
for row in f:
row = row.strip().split("\t")
id_ += 1
if len(row) == 7:
yield id_, {
"rel": row[0].strip(),
"arg1": row[1].strip(),
"arg2": row[2].strip(),
"slot0": "",
"search_query": row[3].strip(),
"pattern": row[4].strip(),
"sentence": row[5].strip(),
"parse": row[6].strip(),
}
else:
yield id_, {
"rel": row[0].strip(),
"arg1": row[1].strip(),
"arg2": row[2].strip(),
"slot0": row[7].strip(),
"search_query": row[3].strip(),
"pattern": row[4].strip(),
"sentence": row[5].strip(),
"parse": row[6].strip(),
}
| 39.57513
| 152
| 0.520817
|
24c8b9883ddbdae5af6ad44d4af85ff58f9bf090
| 1,187
|
py
|
Python
|
iadmin/templatetags/iadmin_urls.py
|
saxix/django-iadmin
|
675317e8f0b4142eaf351595da27c065637a83ba
|
[
"BSD-1-Clause"
] | 1
|
2015-06-23T09:24:12.000Z
|
2015-06-23T09:24:12.000Z
|
iadmin/templatetags/iadmin_urls.py
|
saxix/django-iadmin
|
675317e8f0b4142eaf351595da27c065637a83ba
|
[
"BSD-1-Clause"
] | null | null | null |
iadmin/templatetags/iadmin_urls.py
|
saxix/django-iadmin
|
675317e8f0b4142eaf351595da27c065637a83ba
|
[
"BSD-1-Clause"
] | null | null | null |
from django.core.urlresolvers import reverse, NoReverseMatch, resolve
from django import template
from django.template.base import Template
from django.template.loader import render_to_string, find_template, get_template
register = template.Library()
@register.filter
def admin_urlname(opts, arg):
return 'admin:%s_%s_%s' % (opts.app_label, opts.module_name, arg)
@register.simple_tag(takes_context=True)
def admin_url(context, name, *args, **kwargs):
return reverse('%s:%s' % (context.get('current_app','iadmin'), name), args=args, kwargs=kwargs, current_app=context.get('current_app','iadmin'))
@register.simple_tag(takes_context=True)
def admin_model_url(context, model, name):
return reverse('%s:%s_%s_%s' % (context.get('current_app','iadmin'), model._meta.app_label, model._meta.module_name, name))
@register.simple_tag(takes_context=True)
def iinclude(context, filename):
""" like standard templatetag `include` but allow to use context variables
into the filename
{% iinclude '{{myvar}}/path/to/filename' %}
"""
real_filename = Template(filename).render(context)
return get_template(real_filename).render(context)
| 23.27451
| 148
| 0.738837
|
a3d5f83579039a91f4e92b2e687f17cbacc944e7
| 299
|
py
|
Python
|
saleor/product/migrations/0151_merge_20211022_0935.py
|
wuchujiecode/saleor
|
c2ee650e11b1dde6744be7c46e28262318ae4ac9
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/product/migrations/0151_merge_20211022_0935.py
|
wuchujiecode/saleor
|
c2ee650e11b1dde6744be7c46e28262318ae4ac9
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/product/migrations/0151_merge_20211022_0935.py
|
wuchujiecode/saleor
|
c2ee650e11b1dde6744be7c46e28262318ae4ac9
|
[
"CC-BY-4.0"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-10-22 09:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0149_rename_detail_product_overview'),
('product', '0150_collection_collection_search_gin'),
]
operations = [
]
| 19.933333
| 61
| 0.67893
|
1413af751cf7bd04cdde734fd8967e2977b19dfb
| 2,334
|
py
|
Python
|
ROAR/agent_module/jAM2Agent.py
|
mavabene/ROAR
|
51955293ad87128ec2c549142abf91d45314f380
|
[
"Apache-2.0"
] | null | null | null |
ROAR/agent_module/jAM2Agent.py
|
mavabene/ROAR
|
51955293ad87128ec2c549142abf91d45314f380
|
[
"Apache-2.0"
] | null | null | null |
ROAR/agent_module/jAM2Agent.py
|
mavabene/ROAR
|
51955293ad87128ec2c549142abf91d45314f380
|
[
"Apache-2.0"
] | null | null | null |
from ROAR.agent_module.agent import Agent
from pathlib import Path
from ROAR.control_module.stanley_controller import Stanley_controller
from ROAR.control_module.pid_controller import PIDController
from ROAR.planning_module.local_planner.simple_waypoint_following_local_planner import \
SimpleWaypointFollowingLocalPlanner
from ROAR.planning_module.behavior_planner.behavior_planner import BehaviorPlanner
from ROAR.planning_module.mission_planner.waypoint_following_mission_planner import WaypointFollowingMissionPlanner
from ROAR.utilities_module.data_structures_models import SensorsData
from ROAR.utilities_module.vehicle_models import VehicleControl, Vehicle
import logging
class JAM2Agent(Agent):
def __init__(self, target_speed=120, **kwargs):
super().__init__(**kwargs)
self.target_speed = target_speed
self.logger = logging.getLogger("Stanley Agent")
self.route_file_path = Path(self.agent_settings.waypoint_file_path)
#self.pid_controller = PIDController(agent=self, steering_boundary=(-1, 1), throttle_boundary=(-1, 1))
self.stanley_controller = Stanley_controller(agent=self, steering_boundary=(-1, 1), throttle_boundary=(-1, 1))
self.mission_planner = WaypointFollowingMissionPlanner(agent=self)
# initiated right after mission plan
self.behavior_planner = BehaviorPlanner(agent=self)
self.local_planner = SimpleWaypointFollowingLocalPlanner(
agent=self,
controller=self.stanley_controller,
mission_planner=self.mission_planner,
behavior_planner=self.behavior_planner,
closeness_threshold=1)
self.logger.debug(
f"Waypoint Following Agent Initiated. Reading f"
f"rom {self.route_file_path.as_posix()}")
def run_step(self, vehicle: Vehicle,
sensors_data: SensorsData) -> VehicleControl:
super(JAM2Agent, self).run_step(vehicle=vehicle,
sensors_data=sensors_data)
self.transform_history.append(self.vehicle.transform)
if self.local_planner.is_done():
control = VehicleControl()
self.logger.debug("Path Following Agent is Done. Idling.")
else:
control = self.local_planner.run_in_series()
return control
| 48.625
| 118
| 0.731362
|
5695f786d070f617c3816571159f973c1d1bfbea
| 7,194
|
py
|
Python
|
src/table_validator/desktop/validation_drop_target.py
|
cthoyt/income2019
|
b336a31d2b15e5ec504fcb57a141602ceea0249e
|
[
"MIT"
] | null | null | null |
src/table_validator/desktop/validation_drop_target.py
|
cthoyt/income2019
|
b336a31d2b15e5ec504fcb57a141602ceea0249e
|
[
"MIT"
] | 1
|
2019-08-15T12:46:21.000Z
|
2019-08-15T13:56:49.000Z
|
src/table_validator/desktop/validation_drop_target.py
|
cthoyt/income2019
|
b336a31d2b15e5ec504fcb57a141602ceea0249e
|
[
"MIT"
] | 3
|
2019-08-07T10:09:19.000Z
|
2019-11-27T22:26:06.000Z
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Desktop GUI for ``table_validator``.
Author: Wolfgang Müller
The initial starting point was taken from zetcode
However, there are only few lines that survived changes.
-
ZetCode PyQt5 tutorial
This is a simple drag and
drop example.
Author: Jan Bodnar
Website: zetcode.com
Last edited: August 2017
http://zetcode.com/gui/pyqt5/dragdrop/
"""
import logging
import urllib.request
from typing import Type
import click
from PyQt5.QtCore import QPropertyAnimation, QRect, Qt
from PyQt5.QtWidgets import QApplication, QLabel, QVBoxLayout, QWidget
import table_validator
logger = logging.getLogger(__name__)
__all__ = [
'ValidationDropTarget',
'main',
]
class ValidationDropTarget(QWidget):
"""A Qt app that is a drop target and validates the file dropped."""
def __init__(self, app, validate, bottom, right):
self.label_url = QLabel()
self.label_success = QLabel()
self.label_instructions = QLabel()
# self.label_url = 0
super().__init__()
self.app = app
self.bottom = bottom
self.right = right
self.setAcceptDrops(True)
self.initUI()
self.validate = validate
# taken from
# https://www.iana.org/assignments/media-types/media-types.txt
self.accepted_formats = ['text/uri-list']
def _big_geometry(self):
w = 30
h = 30
x = self.right - w
y = self.bottom - h
big_w = 300
big_h = 300
big_x = self.right - big_w
big_y = self.bottom - big_h
if (self.x() < x) and (self.y() < y):
return
self.animation = QPropertyAnimation(self, b"geometry")
self.animation.setDuration(250)
self.animation.setStartValue(QRect(x, y, w, h))
self.animation.setEndValue(QRect(big_x, big_y, big_w, big_h))
self.animation.start()
self.setFixedSize(big_w, big_h)
def _small_geometry(self):
w = 30
h = 30
x = self.right - w
y = self.bottom - h
big_w = 300
big_h = 300
big_x = self.right - big_w
big_y = self.bottom - big_h
if (self.x() == x) and (self.y() == y):
return
self.animation = QPropertyAnimation(self, b"geometry")
self.animation.setDuration(250)
self.animation.setStartValue(QRect(big_x, big_y, big_w, big_h))
self.animation.setEndValue(QRect(x, y, w, h))
self.animation.start()
self.setFixedSize(big_w, big_h)
@staticmethod
def preprocess_response(data):
return table_validator.parse_tsv(data.split("\n"))
def dropEvent(self, e): # noqa: N802
"""Handle file drop events."""
logger.debug("Dropped!")
urls = e.mimeData().urls()
response = urllib.request.urlopen(urls[0].toString()) # noqa:S310
data = response.read().decode("UTF-8")
candidate = self.preprocess_response(data)
logger.debug("Candidate %s" % candidate)
self.label_url.setText("File examined: %s" % urls[0].toString())
if self.validate(candidate):
self.label_success.setText(
'<span style=" font-size:18pt; font-weight:600; color:#00aa00;">'
'Validation succeeded!'
'</span>'
)
else:
self.label_success.setText(
'<span style=" font-size:18pt; font-weight:600; color:#cc0000;">'
'Your data surely is great, but...'
'</span>'
)
logger.debug("dropped" % urls)
# self._small_geometry()
def is_accepted(self, e):
"""Check a file based on its MIME type."""
accept = any(
e.mimeData().hasFormat(i)
for i in self.accepted_formats
)
if accept:
e.accept()
else:
e.ignore()
def enterEvent(self, e):
self._big_geometry()
def leaveEvent(self, e):
self._small_geometry()
def dragEnterEvent(self, e): # noqa: N802
"""Decide if you can drop a given type of file in the drop zone."""
self._big_geometry()
logger.debug("enter")
logger.debug(f'URLs: {e.mimeData().urls()}')
accept = self.is_accepted(e)
if accept:
logger.debug("Accepted")
else:
logger.debug("failed %s" % e.mimeData().formats())
# initUI
def initUI(self):
self.setWindowFlags(Qt.WindowStaysOnTopHint | Qt.FramelessWindowHint)
self._small_geometry()
# https://stackoverflow.com/questions/18975734/how-can-i-find-the-screen-desktop-size-in-qt-so-i-can-display-a-desktop-notific
self.label_url.setAlignment(Qt.AlignLeft)
self.label_url.setWordWrap(True)
self.label_url.setText("Drop your files here:")
self.label_success.setAlignment(Qt.AlignLeft)
self.label_success.setText('<span style="color:#999999;">I did not yet analyze any file</span>')
self.label_instructions.setAlignment(Qt.AlignLeft)
self.label_instructions.setWordWrap(True)
self.label_instructions.setText("""
<p>
Are you asking yourself if your tabular data file is really matching
the template you agreed on with your collaboration partners?
<p>
Then this tool is the solution for you. Just take this file in your
file manager (finder, windows explorer, nautilus...) and then
<b> drop it</b> onto this window.
<p>
We will check the format compliance of your file and immediately
give
<ul>
<li> information if it is correct with respect to the template
<li> give information on where it is incorrect
</ul>
<p>
<b>Note:</b> Currently we process only <b>tab delimited</b> files.
</p>
""")
vbox = QVBoxLayout()
vbox.addWidget(self.label_url)
vbox.addWidget(self.label_success)
vbox.addWidget(self.label_instructions)
vbox.addStretch()
self.setLayout(vbox)
self.setWindowTitle('INCOME table Validation Drop Target')
# self.setGeometry(800, 500, 300, 400)
def run_with_validator(
validate,
cls: Type[ValidationDropTarget] = None,
) -> None:
if cls is None:
cls = ValidationDropTarget
app = QApplication([])
desktop = app.desktop()
geometry = desktop.availableGeometry()
bottom = geometry.bottom()
right = geometry.right()
drop_target = cls(app, validate, bottom, right)
drop_target.show()
app.exec_()
@click.command()
@click.option('-t', '--template', type=click.File(), default='template.tsv')
@click.option('-v', '--verbose', is_flag=True)
def main(template, verbose: bool):
"""Run the table_validator Desktop App."""
if verbose:
logging.basicConfig(level=logging.DEBUG)
logger.setLevel(logging.DEBUG)
click.echo(f'Building table validator with {template.name}')
validate = table_validator.TemplateValidator(template)
run_with_validator(validate)
if __name__ == '__main__':
main()
| 28.434783
| 134
| 0.61315
|
dd22bc1161228c77d538881a164dc29e5d10fecd
| 35,449
|
py
|
Python
|
python-package/brainflow/board_shim.py
|
kusumikakd/Brainflow
|
eb566a8cd49e27d47f56b93406d7ac17f3ef6547
|
[
"MIT"
] | null | null | null |
python-package/brainflow/board_shim.py
|
kusumikakd/Brainflow
|
eb566a8cd49e27d47f56b93406d7ac17f3ef6547
|
[
"MIT"
] | null | null | null |
python-package/brainflow/board_shim.py
|
kusumikakd/Brainflow
|
eb566a8cd49e27d47f56b93406d7ac17f3ef6547
|
[
"MIT"
] | null | null | null |
import ctypes
import numpy
from numpy.ctypeslib import ndpointer
import pkg_resources
import enum
import os
import platform
import sys
import struct
import json
from brainflow.exit_codes import BrainflowExitCodes
class BoardIds (enum.Enum):
"""Enum to store all supported Board Ids"""
STREAMING_BOARD = -2 #:
SYNTHETIC_BOARD = -1 #:
CYTON_BOARD = 0 #:
GANGLION_BOARD = 1 #:
CYTON_DAISY_BOARD = 2 #:
NOVAXR_BOARD = 3 #:
GANGLION_WIFI_BOARD = 4 #:
CYTON_WIFI_BOARD = 5 #:
CYTON_DAISY_WIFI_BOARD = 6 #:
BRAINBIT_BOARD = 7 #:
UNICORN_BOARD = 8 #:
CALLIBRI_EEG_BOARD = 9 #:
CALLIBRI_EMG_BOARD = 10 #:
CALLIBRI_ECG_BOARD = 11 #:
class LogLevels (enum.Enum):
"""Enum to store all log levels supported by BrainFlow"""
LEVEL_TRACE = 0 #:
LEVEL_DEBUG = 1 #:
LEVEL_INFO = 2 #:
LEVEL_WARN = 3 #:
LEVEL_ERROR = 4 #:
LEVEL_CRITICAL = 5 #:
LEVEL_OFF = 6 #:
class IpProtocolType (enum.Enum):
"""Enum to store Ip Protocol types"""
NONE = 0 #:
UDP = 1 #:
TCP = 2 #:
class BrainFlowInputParams (object):
""" inputs parameters for prepare_session method
:param serial_port: serial port name is used for boards which reads data from serial port
:type serial_port: str
:param mac_address: mac address for example its used for bluetooth based boards
:type mac_address: str
:param ip_address: ip address is used for boards which reads data from socket connection
:type ip_address: str
:param ip_port: ip port for socket connection, for some boards where we know it in front you dont need this parameter(NovaXR for example)
:type ip_port: int
:param ip_protocol: ip protocol type from IpProtocolType enum
:type ip_protocol: int
:param other_info: other info
:type other_info: str
"""
def __init__ (self):
self.serial_port = ''
self.mac_address = ''
self.ip_address = ''
self.ip_port = 0
self.ip_protocol = IpProtocolType.NONE.value
self.other_info = ''
self.timeout = 0
self.serial_number = ''
def to_json (self):
return json.dumps (self, default = lambda o: o.__dict__,
sort_keys = True, indent = 4)
class BrainFlowError (Exception):
"""This exception is raised if non-zero exit code is returned from C code
:param message: exception message
:type message: str
:param exit_code: exit code flow low level API
:type exit_code: int
"""
def __init__ (self, message, exit_code):
detailed_message = '%s:%d %s' % (BrainflowExitCodes (exit_code).name, exit_code, message)
super (BrainFlowError, self).__init__ (detailed_message)
self.exit_code = exit_code
class BoardControllerDLL (object):
__instance = None
@classmethod
def get_instance (cls):
if cls.__instance is None:
cls.__instance = cls ()
return cls.__instance
def __init__ (self):
if platform.system () == 'Windows':
if struct.calcsize ("P") * 8 == 64:
dll_path = 'lib\\BoardController.dll'
else:
dll_path = 'lib\\BoardController32.dll'
elif platform.system () == 'Darwin':
dll_path = 'lib/libBoardController.dylib'
else:
dll_path = 'lib/libBoardController.so'
full_path = pkg_resources.resource_filename (__name__, dll_path)
if os.path.isfile (full_path):
# for python we load dll by direct path but this dll may depend on other dlls and they will not be found!
# to solve it we can load all of them before loading the main one or change PATH\LD_LIBRARY_PATH env var.
# env variable looks better, since it can be done only once for all dependencies
dir_path = os.path.abspath (os.path.dirname (full_path))
if platform.system () == 'Windows':
os.environ['PATH'] = dir_path + os.pathsep + os.environ.get ('PATH', '')
else:
os.environ['LD_LIBRARY_PATH'] = dir_path + os.pathsep + os.environ.get ('LD_LIBRARY_PATH', '')
# for MacOS there are a few more env vars to search for libraries
if platform.system () == 'Darwin':
os.environ['DYLD_LIBRARY_PATH '] = dir_path + os.pathsep + os.environ.get ('DYLD_LIBRARY_PATH ', '')
os.environ['DYLD_FALLBACK_LIBRARY_PATH '] = dir_path + os.pathsep + os.environ.get ('DYLD_FALLBACK_LIBRARY_PATH ', '')
ctypes.cdll.LoadLibrary (pkg_resources.resource_filename (__name__, 'lib/libneurosdk-shared.dylib'))
self.lib = ctypes.cdll.LoadLibrary (full_path)
else:
raise FileNotFoundError ('Dynamic library %s is missed, did you forget to compile brainflow before installation of python package?' % full_path)
self.prepare_session = self.lib.prepare_session
self.prepare_session.restype = ctypes.c_int
self.prepare_session.argtypes = [
ctypes.c_int,
ctypes.c_char_p
]
self.is_prepared = self.lib.is_prepared
self.is_prepared.restype = ctypes.c_int
self.is_prepared.argtypes = [
ndpointer (ctypes.c_int32),
ctypes.c_int,
ctypes.c_char_p
]
self.start_stream = self.lib.start_stream
self.start_stream.restype = ctypes.c_int
self.start_stream.argtypes = [
ctypes.c_int,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_char_p
]
self.stop_stream = self.lib.stop_stream
self.stop_stream.restype = ctypes.c_int
self.stop_stream.argtypes = [
ctypes.c_int,
ctypes.c_char_p
]
self.get_current_board_data = self.lib.get_current_board_data
self.get_current_board_data.restype = ctypes.c_int
self.get_current_board_data.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_double),
ndpointer (ctypes.c_int32),
ctypes.c_int,
ctypes.c_char_p
]
self.get_board_data = self.lib.get_board_data
self.get_board_data.restype = ctypes.c_int
self.get_board_data.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_double),
ctypes.c_int,
ctypes.c_char_p
]
self.release_session = self.lib.release_session
self.release_session.restype = ctypes.c_int
self.release_session.argtypes = [
ctypes.c_int,
ctypes.c_char_p
]
self.get_board_data_count = self.lib.get_board_data_count
self.get_board_data_count.restype = ctypes.c_int
self.get_board_data_count.argtypes = [
ndpointer (ctypes.c_int32),
ctypes.c_int,
ctypes.c_char_p
]
self.set_log_level = self.lib.set_log_level
self.set_log_level.restype = ctypes.c_int
self.set_log_level.argtypes = [
ctypes.c_int
]
self.set_log_file = self.lib.set_log_file
self.set_log_file.restype = ctypes.c_int
self.set_log_file.argtypes = [
ctypes.c_char_p
]
self.log_message = self.lib.log_message
self.log_message.restype = ctypes.c_int
self.log_message.argtypes = [
ctypes.c_int,
ctypes.c_char_p
]
self.config_board = self.lib.config_board
self.config_board.restype = ctypes.c_int
self.config_board.argtypes = [
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_char_p
]
self.get_sampling_rate = self.lib.get_sampling_rate
self.get_sampling_rate.restype = ctypes.c_int
self.get_sampling_rate.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32)
]
self.get_battery_channel = self.lib.get_battery_channel
self.get_battery_channel.restype = ctypes.c_int
self.get_battery_channel.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32)
]
self.get_package_num_channel = self.lib.get_package_num_channel
self.get_package_num_channel.restype = ctypes.c_int
self.get_package_num_channel.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32)
]
self.get_timestamp_channel = self.lib.get_timestamp_channel
self.get_timestamp_channel.restype = ctypes.c_int
self.get_timestamp_channel.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32)
]
self.get_num_rows = self.lib.get_num_rows
self.get_num_rows.restype = ctypes.c_int
self.get_num_rows.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32)
]
self.get_eeg_names = self.lib.get_eeg_names
self.get_eeg_names.restype = ctypes.c_int
self.get_eeg_names.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_ubyte),
ndpointer (ctypes.c_int32)
]
self.get_eeg_channels = self.lib.get_eeg_channels
self.get_eeg_channels.restype = ctypes.c_int
self.get_eeg_channels.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32),
ndpointer (ctypes.c_int32)
]
self.get_emg_channels = self.lib.get_emg_channels
self.get_emg_channels.restype = ctypes.c_int
self.get_emg_channels.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32),
ndpointer (ctypes.c_int32)
]
self.get_ecg_channels = self.lib.get_ecg_channels
self.get_ecg_channels.restype = ctypes.c_int
self.get_ecg_channels.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32),
ndpointer (ctypes.c_int32)
]
self.get_eog_channels = self.lib.get_eog_channels
self.get_eog_channels.restype = ctypes.c_int
self.get_eog_channels.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32),
ndpointer (ctypes.c_int32)
]
self.get_ppg_channels = self.lib.get_ppg_channels
self.get_ppg_channels.restype = ctypes.c_int
self.get_ppg_channels.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32),
ndpointer (ctypes.c_int32)
]
self.get_eda_channels = self.lib.get_eda_channels
self.get_eda_channels.restype = ctypes.c_int
self.get_eda_channels.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32),
ndpointer (ctypes.c_int32)
]
self.get_accel_channels = self.lib.get_accel_channels
self.get_accel_channels.restype = ctypes.c_int
self.get_accel_channels.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32),
ndpointer (ctypes.c_int32)
]
self.get_analog_channels = self.lib.get_analog_channels
self.get_analog_channels.restype = ctypes.c_int
self.get_analog_channels.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32),
ndpointer (ctypes.c_int32)
]
self.get_gyro_channels = self.lib.get_gyro_channels
self.get_gyro_channels.restype = ctypes.c_int
self.get_gyro_channels.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32),
ndpointer (ctypes.c_int32)
]
self.get_other_channels = self.lib.get_other_channels
self.get_other_channels.restype = ctypes.c_int
self.get_other_channels.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32),
ndpointer (ctypes.c_int32)
]
self.get_temperature_channels = self.lib.get_temperature_channels
self.get_temperature_channels.restype = ctypes.c_int
self.get_temperature_channels.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32),
ndpointer (ctypes.c_int32)
]
self.get_resistance_channels = self.lib.get_resistance_channels
self.get_resistance_channels.restype = ctypes.c_int
self.get_resistance_channels.argtypes = [
ctypes.c_int,
ndpointer (ctypes.c_int32),
ndpointer (ctypes.c_int32)
]
class BoardShim (object):
"""BoardShim class is a primary interface to all boards
:param board_id: Id of your board
:type board_id: int
:param input_params: board specific structure to pass required arguments
:type input_params: BrainFlowInputParams
"""
def __init__ (self, board_id, input_params):
try:
self.input_json = input_params.to_json ().encode ()
except:
self.input_json = input_params.to_json ()
else:
self.port_name = None
self.board_id = board_id
# we need it for streaming board
if board_id == BoardIds.STREAMING_BOARD.value:
try:
self._master_board_id = int (input_params.other_info)
except:
raise BrainFlowError ('set master board id using params.other_info for STREAMING_BOARD',
BrainflowExitCodes.INVALID_ARGUMENTS_ERROR.value)
else:
self._master_board_id = self.board_id
@classmethod
def set_log_level (cls, log_level):
"""set BrainFlow log level, use it only if you want to write your own messages to BrainFlow logger,
otherwise use enable_board_logger, enable_dev_board_logger or disable_board_logger
:param log_level: log level, to specify it you should use values from LogLevels enum
:type log_level: int
"""
res = BoardControllerDLL.get_instance ().set_log_level (log_level)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to enable logger', res)
@classmethod
def enable_board_logger (cls):
"""enable BrainFlow Logger with level INFO, uses stderr for log messages by default"""
cls.set_log_level (LogLevels.LEVEL_INFO.value)
@classmethod
def disable_board_logger (cls):
"""disable BrainFlow Logger"""
cls.set_log_level (LogLevels.LEVEL_OFF.value)
@classmethod
def enable_dev_board_logger (cls):
"""enable BrainFlow Logger with level TRACE, uses stderr for log messages by default"""
cls.set_log_level (LogLevels.LEVEL_TRACE.value)
@classmethod
def log_message (cls, log_level, message):
"""write your own log message to BrainFlow logger, use it if you wanna have single logger for your own code and BrainFlow's code
:param log_level: log level
:type log_file: int
:param message: message
:type message: str
"""
try:
msg = message.encode ()
except:
msg = message
res = BoardControllerDLL.get_instance ().log_message (log_level, msg)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to write log message', res)
@classmethod
def set_log_file (cls, log_file):
"""redirect logger from stderr to file, can be called any time
:param log_file: log file name
:type log_file: str
"""
try:
file = log_file.encode ()
except:
file = log_file
res = BoardControllerDLL.get_instance ().set_log_file (file)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to redirect logs to a file', res)
@classmethod
def get_sampling_rate (cls, board_id):
"""get sampling rate for a board
:param board_id: Board Id
:type board_id: int
:return: sampling rate for this board id
:rtype: int
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
sampling_rate = numpy.zeros (1).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_sampling_rate (board_id, sampling_rate)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
return int (sampling_rate[0])
@classmethod
def get_package_num_channel (cls, board_id):
"""get package num channel for a board
:param board_id: Board Id
:type board_id: int
:return: number of package num channel
:rtype: int
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
package_num_channel = numpy.zeros (1).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_package_num_channel (board_id, package_num_channel)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
return int (package_num_channel[0])
@classmethod
def get_battery_channel (cls, board_id):
"""get battery channel for a board
:param board_id: Board Id
:type board_id: int
:return: number of batter channel
:rtype: int
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
battery_channel = numpy.zeros (1).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_battery_channel (board_id, battery_channel)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
return int (battery_channel[0])
@classmethod
def get_num_rows (cls, board_id):
"""get number of rows in resulting data table for a board
:param board_id: Board Id
:type board_id: int
:return: number of rows in returned numpy array
:rtype: int
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
num_rows = numpy.zeros (1).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_num_rows (board_id, num_rows)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
return int (num_rows[0])
@classmethod
def get_timestamp_channel (cls, board_id):
"""get timestamp channel in resulting data table for a board
:param board_id: Board Id
:type board_id: int
:return: number of timestamp channel in returned numpy array
:rtype: int
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
timestamp_channel = numpy.zeros (1).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_timestamp_channel (board_id, timestamp_channel)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
return int (timestamp_channel[0])
@classmethod
def get_eeg_names (cls, board_id):
"""get names of EEG channels in 10-20 system if their location is fixed
:param board_id: Board Id
:type board_id: int
:return: EEG channels names
:rtype: list
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
string = numpy.zeros (4096).astype (numpy.ubyte)
string_len = numpy.zeros (1).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_eeg_names (board_id, string, string_len)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
return string.tobytes ().decode ('utf-8')[0:string_len[0]].split (',')
@classmethod
def get_eeg_channels (cls, board_id):
"""get list of eeg channels in resulting data table for a board
:param board_id: Board Id
:type board_id: int
:return: list of eeg channels in returned numpy array
:rtype: list
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
num_channels = numpy.zeros (1).astype (numpy.int32)
eeg_channels = numpy.zeros (512).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_eeg_channels (board_id, eeg_channels, num_channels)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
result = eeg_channels.tolist () [0:num_channels[0]]
return result
@classmethod
def get_emg_channels (cls, board_id):
"""get list of emg channels in resulting data table for a board
:param board_id: Board Id
:type board_id: int
:return: list of eeg channels in returned numpy array
:rtype: list
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
num_channels = numpy.zeros (1).astype (numpy.int32)
emg_channels = numpy.zeros (512).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_emg_channels (board_id, emg_channels, num_channels)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
result = emg_channels.tolist () [0:num_channels[0]]
return result
@classmethod
def get_ecg_channels (cls, board_id):
"""get list of ecg channels in resulting data table for a board
:param board_id: Board Id
:type board_id: int
:return: list of ecg channels in returned numpy array
:rtype: list
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
num_channels = numpy.zeros (1).astype (numpy.int32)
ecg_channels = numpy.zeros (512).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_ecg_channels (board_id, ecg_channels, num_channels)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
result = ecg_channels.tolist () [0:num_channels[0]]
return result
@classmethod
def get_eog_channels (cls, board_id):
"""get list of eog channels in resulting data table for a board
:param board_id: Board Id
:type board_id: int
:return: list of eog channels in returned numpy array
:rtype: list
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
num_channels = numpy.zeros (1).astype (numpy.int32)
eog_channels = numpy.zeros (512).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_eog_channels (board_id, eog_channels, num_channels)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
result = eog_channels.tolist () [0:num_channels[0]]
return result
@classmethod
def get_eda_channels (cls, board_id):
"""get list of eda channels in resulting data table for a board
:param board_id: Board Id
:type board_id: int
:return: list of eda channels in returned numpy array
:rtype: list
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
num_channels = numpy.zeros (1).astype (numpy.int32)
eda_channels = numpy.zeros (512).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_eda_channels (board_id, eda_channels, num_channels)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
result = eda_channels.tolist () [0:num_channels[0]]
return result
@classmethod
def get_ppg_channels (cls, board_id):
"""get list of ppg channels in resulting data table for a board
:param board_id: Board Id
:type board_id: int
:return: list of ppg channels in returned numpy array
:rtype: list
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
num_channels = numpy.zeros (1).astype (numpy.int32)
ppg_channels = numpy.zeros (512).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_ppg_channels (board_id, ppg_channels, num_channels)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
result = ppg_channels.tolist () [0:num_channels[0]]
return result
@classmethod
def get_accel_channels (cls, board_id):
"""get list of accel channels in resulting data table for a board
:param board_id: Board Id
:type board_id: int
:return: list of accel channels in returned numpy array
:rtype: list
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
num_channels = numpy.zeros (1).astype (numpy.int32)
accel_channels = numpy.zeros (512).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_accel_channels (board_id, accel_channels, num_channels)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
result = accel_channels.tolist () [0:num_channels[0]]
return result
@classmethod
def get_analog_channels (cls, board_id):
"""get list of analog channels in resulting data table for a board
:param board_id: Board Id
:type board_id: int
:return: list of analog channels in returned numpy array
:rtype: list
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
num_channels = numpy.zeros (1).astype (numpy.int32)
analog_channels = numpy.zeros (512).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_analog_channels (board_id, analog_channels, num_channels)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
result = analog_channels.tolist () [0:num_channels[0]]
return result
@classmethod
def get_gyro_channels (cls, board_id):
"""get list of gyro channels in resulting data table for a board
:param board_id: Board Id
:type board_id: int
:return: list of gyro channels in returned numpy array
:rtype: list
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
num_channels = numpy.zeros (1).astype (numpy.int32)
gyro_channels = numpy.zeros (512).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_gyro_channels (board_id, gyro_channels, num_channels)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
result = gyro_channels.tolist () [0:num_channels[0]]
return result
@classmethod
def get_other_channels (cls, board_id):
"""get list of other channels in resulting data table for a board
:param board_id: Board Id
:type board_id: int
:return: list of other channels in returned numpy array
:rtype: list
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
num_channels = numpy.zeros (1).astype (numpy.int32)
other_channels = numpy.zeros (512).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_other_channels (board_id, other_channels, num_channels)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
result = other_channels.tolist () [0:num_channels[0]]
return result
@classmethod
def get_temperature_channels (cls, board_id):
"""get list of temperature channels in resulting data table for a board
:param board_id: Board Id
:type board_id: int
:return: list of temperature channels in returned numpy array
:rtype: list
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
num_channels = numpy.zeros (1).astype (numpy.int32)
temperature_channels = numpy.zeros (512).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_temperature_channels (board_id, temperature_channels, num_channels)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
result = temperature_channels.tolist () [0:num_channels[0]]
return result
@classmethod
def get_resistance_channels (cls, board_id):
"""get list of resistance channels in resulting data table for a board
:param board_id: Board Id
:type board_id: int
:return: list of resistance channels in returned numpy array
:rtype: list
:raises BrainFlowError: If this board has no such data exit code is UNSUPPORTED_BOARD_ERROR
"""
num_channels = numpy.zeros (1).astype (numpy.int32)
resistance_channels = numpy.zeros (512).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_resistance_channels (board_id, resistance_channels, num_channels)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to request info about this board', res)
result = resistance_channels.tolist () [0:num_channels[0]]
return result
def prepare_session (self):
"""prepare streaming sesssion, init resources, you need to call it before any other BoardShim object methods"""
res = BoardControllerDLL.get_instance ().prepare_session (self.board_id, self.input_json)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to prepare streaming session', res)
def start_stream (self, num_samples = 1800 * 250, streamer_params = None):
"""Start streaming data, this methods stores data in ringbuffer
:param num_samples: size of ring buffer to keep data
:type num_samples: int
:param streamer_params parameter to stream data from brainflow, supported vals: "file://%file_name%:w", "file://%file_name%:a", "streaming_board://%multicast_group_ip%:%port%". Range for multicast addresses is from "224.0.0.0" to "239.255.255.255"
:type streamer_params: str
"""
if streamer_params is None:
streamer = None
else:
try:
streamer = streamer_params.encode ()
except:
streamer = streamer_params
res = BoardControllerDLL.get_instance ().start_stream (num_samples, streamer, self.board_id, self.input_json)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to start streaming session', res)
def stop_stream (self):
"""Stop streaming data"""
res = BoardControllerDLL.get_instance ().stop_stream (self.board_id, self.input_json)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to stop streaming session', res)
def release_session (self):
"""release all resources"""
res = BoardControllerDLL.get_instance ().release_session (self.board_id, self.input_json)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to release streaming session', res)
def get_current_board_data (self, num_samples):
"""Get specified amount of data or less if there is not enough data, doesnt remove data from ringbuffer
:param num_samples: max number of samples
:type num_samples: int
:return: latest data from a board
:rtype: numpy 2d array
"""
package_length = BoardShim.get_num_rows (self._master_board_id)
data_arr = numpy.zeros (int(num_samples * package_length)).astype (numpy.float64)
current_size = numpy.zeros (1).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_current_board_data (num_samples, data_arr, current_size, self.board_id, self.input_json)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to get current data', res)
if len (current_size) == 0:
return None
data_arr = data_arr[0:current_size[0] * package_length].reshape (package_length, current_size[0])
return data_arr
def get_board_data_count (self):
"""Get num of elements in ringbuffer
:return: number of elements in ring buffer
:rtype: int
"""
data_size = numpy.zeros (1).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().get_board_data_count (data_size, self.board_id, self.input_json)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to obtain buffer size', res)
return data_size[0]
def is_prepared (self):
"""Check if session is ready or not
:return: session status
:rtype: bool
"""
prepared = numpy.zeros (1).astype (numpy.int32)
res = BoardControllerDLL.get_instance ().is_prepared (prepared, self.board_id, self.input_json)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to check session status', res)
return bool(prepared[0])
def get_board_data (self):
"""Get all board data and remove them from ringbuffer
:return: all data from a board
:rtype: numpy 2d array
"""
data_size = self.get_board_data_count ()
package_length = BoardShim.get_num_rows (self._master_board_id)
data_arr = numpy.zeros (data_size * package_length).astype (numpy.float64)
res = BoardControllerDLL.get_instance ().get_board_data (data_size, data_arr, self.board_id, self.input_json)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to get board data', res)
return data_arr.reshape (package_length, data_size)
def config_board (self, config):
"""Use this method carefully and only if you understand what you are doing, do NOT use it to start or stop streaming
:param config: string to send to a board
:type config: str
"""
try:
config_string = config.encode ()
except:
config_string = config
res = BoardControllerDLL.get_instance ().config_board (config_string, self.board_id, self.input_json)
if res != BrainflowExitCodes.STATUS_OK.value:
raise BrainFlowError ('unable to config board', res)
| 39.387778
| 255
| 0.649835
|
33390971d92a152b2c5c47aaec7bacfc91e22499
| 630
|
py
|
Python
|
ifind/common/encoding.py
|
padre-lab-eu/extended_simiir
|
aa9d54784dcfb4c629687317622eae8ba6d59a79
|
[
"MIT"
] | 2
|
2018-03-20T01:14:50.000Z
|
2021-05-15T12:05:32.000Z
|
ifind/common/encoding.py
|
padre-lab-eu/extended_simiir
|
aa9d54784dcfb4c629687317622eae8ba6d59a79
|
[
"MIT"
] | null | null | null |
ifind/common/encoding.py
|
padre-lab-eu/extended_simiir
|
aa9d54784dcfb4c629687317622eae8ba6d59a79
|
[
"MIT"
] | 3
|
2015-02-04T13:36:26.000Z
|
2015-03-17T08:22:27.000Z
|
import string
def encode_symbols(instring):
"""
Encodes symbols for HTTP GET.
Args:
string (str): String to be included in a GET request.
Returns:
str: String with some characters replaced with URL encodings.
Usage:
Private method.
"""
encoded_string = string.replace(instring, "'", '%27')
encoded_string = string.replace(encoded_string, '"', '%27')
encoded_string = string.replace(encoded_string, '+', '%2b')
encoded_string = string.replace(encoded_string, ' ', '%20')
encoded_string = string.replace(encoded_string, ':', '%3a')
return encoded_string
| 26.25
| 69
| 0.650794
|
096235ddeed3f14cfe50f8aa61cf36616fe92a1b
| 5,217
|
py
|
Python
|
python/lsst/eotest/sensor/pca_bias_profile_plots.py
|
tguillemLSST/eotest
|
c6f150984fa5dff85b9805028645bf46fc846f11
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2016-04-21T07:05:45.000Z
|
2020-08-05T08:37:37.000Z
|
python/lsst/eotest/sensor/pca_bias_profile_plots.py
|
tguillemLSST/eotest
|
c6f150984fa5dff85b9805028645bf46fc846f11
|
[
"BSD-3-Clause-LBNL"
] | 70
|
2015-03-26T09:48:53.000Z
|
2020-04-22T16:29:43.000Z
|
python/lsst/eotest/sensor/pca_bias_profile_plots.py
|
tguillemLSST/eotest
|
c6f150984fa5dff85b9805028645bf46fc846f11
|
[
"BSD-3-Clause-LBNL"
] | 5
|
2017-08-15T20:52:44.000Z
|
2022-03-25T12:54:07.000Z
|
import os
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from .ccd_bias_pca import CCD_bias_PCA
from .overscan_frame import make_overscan_frame
from .AmplifierGeometry import makeAmplifierGeometry
__all__ = ['pca_bias_profile_plots']
def plot_imarr(imarr, vmin=-10, vmax=10):
plt.imshow((imarr - imarr.mean()).T, vmin=vmin, vmax=vmax, origin='lower')
plt.colorbar()
def pca_bias_profile_plots(raw_file, amp, pca_bias_files, suffix='',
vmin=-10, vmax=10, amps=None):
if amps is None:
amps = (amp,)
amp_geom = makeAmplifierGeometry(raw_file)
ccd_pcas = CCD_bias_PCA.read_model(*pca_bias_files)
if not hasattr(ccd_pcas, 'nx'):
ccd_pcas.nx = 10
ccd_pcas.ny = 10
with fits.open(raw_file) as hdus:
raft = hdus[0].header['RAFTBAY']
sensor = hdus[0].header['CCDSLOT']
Run = hdus[0].header['RUNNUM']
seqnum = hdus[0].header['SEQNUM']
datasec = hdus[1].header['DATASEC']
prescan = int(datasec.strip('[]').split(':')[0]) - 1
bias_file = f'bias_model_{raft}_{sensor}_{Run}_{seqnum:06d}_median.fits'
residuals_file = f'residuals_{raft}_{sensor}_{Run}_{seqnum:06d}_median.fits'
ccd_pcas.make_bias_frame(raw_file, bias_file,
residuals_file=residuals_file, amps=amps)
overscan_file = f'overscan_model_{raft}_{sensor}_{Run}_{seqnum:06d}.fits'
make_overscan_frame(raw_file, outfile=overscan_file, amps=amps)
with fits.open(raw_file) as raw, fits.open(bias_file) as bias,\
fits.open(overscan_file) as oscan:
nrows = 6
row_height = 3
alpha = 0.5
print(amp)
title = f'Run {Run}, {raft}_{sensor}, SEQNUM {seqnum}, amp {amp}'
fig = plt.figure(figsize=(10, nrows*row_height))
fig.add_subplot(nrows, 1, 1)
plot_imarr(raw[amp].data, vmin=vmin, vmax=vmax)
plt.title('raw data')
fig.add_subplot(nrows, 1, 2)
plot_imarr(bias[amp].data, vmin=vmin, vmax=vmax)
plt.title('PCA bias model')
fig.add_subplot(nrows, 1, 3)
plot_imarr(raw[amp].data - bias[amp].data, vmin=vmin, vmax=vmax)
plt.title('raw - bias')
fig.add_subplot(nrows, 1, 4)
plot_imarr(oscan[amp].data, vmin=vmin, vmax=vmax)
plt.title('overscan-based image')
fig.add_subplot(nrows, 1, 5)
plot_imarr(raw[amp].data - oscan[amp].data, vmin=vmin, vmax=vmax)
plt.title('raw - overscan-based image')
plt.suptitle('\n'.join((title, os.path.basename(raw_file))))
# Plot raw, bias model, and overscan profiles in the serial
# direction using the pixels in the parallel overscan region.
rows = slice(amp_geom.parallel_overscan.getMinY(),
amp_geom.parallel_overscan.getMaxY())
fig.add_subplot(nrows, 2, nrows*2-1)
oscan_profile = np.mean(oscan[amp].data[rows, :], axis=0)[prescan:]
plt.plot(oscan_profile, label='overscan region', alpha=alpha)
plt.plot(np.mean(raw[amp].data, axis=0)[prescan:],
label='raw data (full segment)', alpha=alpha)
bias_profile = np.mean(bias[amp].data[rows, :], axis=0)[prescan:]
plt.plot(bias_profile, label='bias model (overscan region)',
alpha=alpha)
ymin, ymax = np.min(bias_profile), np.max(bias_profile)
dy = (ymax - ymin)/5.
plt.ylim(ymin - dy, ymax + dy)
plt.legend(fontsize='x-small')
plt.title('serial direction profiles')
# Plot raw, bias model, and overscan profiles in the parallel
# direction using the pixels in the serial overscan region.
columns = slice(amp_geom.serial_overscan.getMinX(),
amp_geom.serial_overscan.getMaxX())
fig.add_subplot(nrows, 2, nrows*2)
oscan_profile = np.mean(oscan[amp].data[:, columns], axis=1)
plt.plot(oscan_profile, label='overscan region',
alpha=alpha)
plt.plot(np.mean(raw[amp].data, axis=1),
label='raw data (full segment)', alpha=alpha)
plt.plot(np.mean(bias[amp].data[:, columns], axis=1),
label='bias model (overscan region)', alpha=alpha)
ymin, ymax = np.min(oscan_profile), np.max(oscan_profile)
dy = (ymax - ymin)/5.
plt.ylim(ymin - dy, ymax + dy)
plt.title('parallel direction profiles')
plt.savefig(f'{Run}_{raft}_{sensor}_{seqnum}_{amp}_{suffix}_'
'bias_model_profiles.png')
fig1 = plt.figure(figsize=(10, 8))
plt.hist((raw[amp].data - bias[amp].data)[:, prescan:].ravel(),
bins=100, range=(-50, 50), histtype='step',
label='raw - bias model')
plt.hist((raw[amp].data - oscan[amp].data)[:, prescan:].ravel(),
bins=100, range=(-50, 50), histtype='step',
label='raw - overscan-based image')
plt.title(title)
plt.legend(fontsize='x-small')
plt.xlabel('residuals (ADU/pixel)')
plt.yscale('log')
plt.savefig(f'{Run}_{raft}_{sensor}_{seqnum}_{amp}_{suffix}_'
'bias_model_residuals_hist.png')
| 44.211864
| 80
| 0.612229
|
58ae87ec5b698f77dadfbc3abdb4413837d5561c
| 2,058
|
py
|
Python
|
Proyect Code/4.-All Sklearn Classification Algorithms.py
|
DataEngel/Heart-Disease-UCI---Classification
|
8601b1c94aad9de9e3af60dc11ba75325fe95b9d
|
[
"Apache-2.0"
] | null | null | null |
Proyect Code/4.-All Sklearn Classification Algorithms.py
|
DataEngel/Heart-Disease-UCI---Classification
|
8601b1c94aad9de9e3af60dc11ba75325fe95b9d
|
[
"Apache-2.0"
] | null | null | null |
Proyect Code/4.-All Sklearn Classification Algorithms.py
|
DataEngel/Heart-Disease-UCI---Classification
|
8601b1c94aad9de9e3af60dc11ba75325fe95b9d
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import warnings
warnings.filterwarnings("ignore")
if __name__ == '__main__':
dt_heart = pd.read_csv('data/heart.csv')
#print(dt_heart['target'].describe())
x = dt_heart.drop(['target'], axis=1)
y = dt_heart['target']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.35, random_state=1)
knn_class = KNeighborsClassifier().fit(x_train, y_train)
knn_prediction = knn_class.predict(x_test)
print('='*64)
print('SCORE con KNN: ', accuracy_score(knn_prediction, y_test))
'''bag_class = BaggingClassifier(base_estimator=KNeighborsClassifier(), n_estimators=50).fit(x_train, y_train) # base_estimator pide el estimador en el que va a estar basado nuestro metodo || n_estimators nos pide cuantos de estos modelos vamos a utilizar
bag_pred = bag_class.predict(x_test)
print('='*64)
print(accuracy_score(bag_pred, y_test))'''
estimators = {
'LogisticRegression' : LogisticRegression(),
'SVC' : SVC(),
'LinearSVC' : LinearSVC(),
'SGD' : SGDClassifier(loss="hinge", penalty="l2", max_iter=5),
'KNN' : KNeighborsClassifier(),
'DecisionTreeClf' : DecisionTreeClassifier(),
'RandomTreeForest' : RandomForestClassifier(random_state=0)
}
for name, estimator in estimators.items():
bag_class = BaggingClassifier(base_estimator=estimator, n_estimators=50).fit(x_train, y_train)
bag_predict = bag_class.predict(x_test)
print('='*64)
print('SCORE Bagging with {} : {}'.format(name, accuracy_score(bag_predict, y_test)))
| 39.576923
| 259
| 0.729835
|
29a04727ba3657d7eba62b0f3c8d273e0c4e82aa
| 2,361
|
py
|
Python
|
phaseshifts/gui/ImportDialog.py
|
Liam-Deacon/phaseshifts
|
23307b42e2d7b1fe772f78257635dc0872ca7ea4
|
[
"MIT"
] | null | null | null |
phaseshifts/gui/ImportDialog.py
|
Liam-Deacon/phaseshifts
|
23307b42e2d7b1fe772f78257635dc0872ca7ea4
|
[
"MIT"
] | 1
|
2019-09-21T15:12:58.000Z
|
2019-09-21T15:12:58.000Z
|
phaseshifts/gui/ImportDialog.py
|
Lightslayer/phaseshifts
|
23307b42e2d7b1fe772f78257635dc0872ca7ea4
|
[
"MIT"
] | null | null | null |
'''
Created on 10 Feb 2014
@author: Liam Deacon
@contact: liam.deacon@diamond.ac.uk
@copyright: Copyright 2014 Liam Deacon
@license: MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to
do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from PyQt4 import QtGui, uic
import res_rc
class ImportDialog(QtGui.QDialog):
'''
Dialog class for updating sequences
'''
def __init__(self, parent=None, model=None):
super(ImportDialog, self).__init__(parent)
# set dictionary
self.action = None
# dynamically load ui
self.ui = uic.loadUi("gui/ImportDialog.ui", self)
self.initUi()
if isinstance(model, str):
model = model.lower()
if model == 'slab':
self.ui.radioSlab.setChecked(True)
self.ui.show()
def initUi(self):
# Setup slots and signals
self.ui.buttonBox.clicked[
QtGui.QAbstractButton].connect(self.buttonPress)
def buttonPress(self, button):
'''Deal with user interaction of button group'''
action = str(button.text()).lower()
if action == 'cancel':
# do not apply settings & close dialog
self.action = action
self.ui.close()
elif action == 'ok':
self.action = action
self.ui.close()
| 33.253521
| 80
| 0.663278
|
6ed272c09833eed8d7bee715b191c5e49e913f8f
| 382
|
py
|
Python
|
build/lib/minotaur-manticore-maze/progress.py
|
smidem/minotaur-manticore-maze
|
0c08c83857b19be6cc6cae4b1f2acf5d485858a6
|
[
"MIT"
] | null | null | null |
build/lib/minotaur-manticore-maze/progress.py
|
smidem/minotaur-manticore-maze
|
0c08c83857b19be6cc6cae4b1f2acf5d485858a6
|
[
"MIT"
] | null | null | null |
build/lib/minotaur-manticore-maze/progress.py
|
smidem/minotaur-manticore-maze
|
0c08c83857b19be6cc6cae4b1f2acf5d485858a6
|
[
"MIT"
] | null | null | null |
from tqdm import tqdm
import time
class Progress():
def bar(self, secs, prefix):
self.secs = secs
self.prefix = prefix
with tqdm(total=100,
ncols=75,
bar_format='{l_bar}{bar}|',
desc=prefix) as pbar:
for i in range(100):
pbar.update(1)
time.sleep(secs)
| 22.470588
| 45
| 0.481675
|
666ada53d2334ae4e4296495f6c400d73483ea82
| 9,075
|
py
|
Python
|
sdks/python/http_client/v1/polyaxon_sdk/models/v1_search.py
|
gregmbi/polyaxon
|
8f24089fa9cb5df28fc7b70aec27d6d23ee81e8d
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/http_client/v1/polyaxon_sdk/models/v1_search.py
|
gregmbi/polyaxon
|
8f24089fa9cb5df28fc7b70aec27d6d23ee81e8d
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/http_client/v1/polyaxon_sdk/models/v1_search.py
|
gregmbi/polyaxon
|
8f24089fa9cb5df28fc7b70aec27d6d23ee81e8d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.0.79
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1Search(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"uuid": "str",
"name": "str",
"description": "str",
"tags": "list[str]",
"disabled": "bool",
"deleted": "bool",
"spec": "V1SearchSpec",
"created_at": "datetime",
"updated_at": "datetime",
}
attribute_map = {
"uuid": "uuid",
"name": "name",
"description": "description",
"tags": "tags",
"disabled": "disabled",
"deleted": "deleted",
"spec": "spec",
"created_at": "created_at",
"updated_at": "updated_at",
}
def __init__(
self,
uuid=None,
name=None,
description=None,
tags=None,
disabled=None,
deleted=None,
spec=None,
created_at=None,
updated_at=None,
local_vars_configuration=None,
): # noqa: E501
"""V1Search - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._uuid = None
self._name = None
self._description = None
self._tags = None
self._disabled = None
self._deleted = None
self._spec = None
self._created_at = None
self._updated_at = None
self.discriminator = None
if uuid is not None:
self.uuid = uuid
if name is not None:
self.name = name
if description is not None:
self.description = description
if tags is not None:
self.tags = tags
if disabled is not None:
self.disabled = disabled
if deleted is not None:
self.deleted = deleted
if spec is not None:
self.spec = spec
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
@property
def uuid(self):
"""Gets the uuid of this V1Search. # noqa: E501
:return: The uuid of this V1Search. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this V1Search.
:param uuid: The uuid of this V1Search. # noqa: E501
:type: str
"""
self._uuid = uuid
@property
def name(self):
"""Gets the name of this V1Search. # noqa: E501
:return: The name of this V1Search. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1Search.
:param name: The name of this V1Search. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this V1Search. # noqa: E501
:return: The description of this V1Search. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this V1Search.
:param description: The description of this V1Search. # noqa: E501
:type: str
"""
self._description = description
@property
def tags(self):
"""Gets the tags of this V1Search. # noqa: E501
:return: The tags of this V1Search. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this V1Search.
:param tags: The tags of this V1Search. # noqa: E501
:type: list[str]
"""
self._tags = tags
@property
def disabled(self):
"""Gets the disabled of this V1Search. # noqa: E501
:return: The disabled of this V1Search. # noqa: E501
:rtype: bool
"""
return self._disabled
@disabled.setter
def disabled(self, disabled):
"""Sets the disabled of this V1Search.
:param disabled: The disabled of this V1Search. # noqa: E501
:type: bool
"""
self._disabled = disabled
@property
def deleted(self):
"""Gets the deleted of this V1Search. # noqa: E501
:return: The deleted of this V1Search. # noqa: E501
:rtype: bool
"""
return self._deleted
@deleted.setter
def deleted(self, deleted):
"""Sets the deleted of this V1Search.
:param deleted: The deleted of this V1Search. # noqa: E501
:type: bool
"""
self._deleted = deleted
@property
def spec(self):
"""Gets the spec of this V1Search. # noqa: E501
:return: The spec of this V1Search. # noqa: E501
:rtype: V1SearchSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1Search.
:param spec: The spec of this V1Search. # noqa: E501
:type: V1SearchSpec
"""
self._spec = spec
@property
def created_at(self):
"""Gets the created_at of this V1Search. # noqa: E501
:return: The created_at of this V1Search. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this V1Search.
:param created_at: The created_at of this V1Search. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this V1Search. # noqa: E501
:return: The updated_at of this V1Search. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this V1Search.
:param updated_at: The updated_at of this V1Search. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Search):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Search):
return True
return self.to_dict() != other.to_dict()
| 25.208333
| 85
| 0.562975
|
8e886a4bbe7582d0c4cc635e0b153178757a725d
| 1,022
|
py
|
Python
|
lib/_Install_Package.py
|
jeffrimko/Verace
|
2823e88367dd45f86ae770839d4f114577495639
|
[
"MIT"
] | null | null | null |
lib/_Install_Package.py
|
jeffrimko/Verace
|
2823e88367dd45f86ae770839d4f114577495639
|
[
"MIT"
] | null | null | null |
lib/_Install_Package.py
|
jeffrimko/Verace
|
2823e88367dd45f86ae770839d4f114577495639
|
[
"MIT"
] | null | null | null |
##==============================================================#
## SECTION: Imports #
##==============================================================#
import os
import subprocess
##==============================================================#
## SECTION: Function Definitions #
##==============================================================#
def generate_readme():
subprocess.call("asciidoctor -b docbook ../README.adoc", shell=True)
subprocess.call("pandoc -r docbook -w rst -o README.rst ../README.xml", shell=True)
os.remove("../README.xml")
def cleanup_readme():
os.remove("README.rst")
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
generate_readme()
subprocess.call("python setup.py install", shell=True)
cleanup_readme()
| 36.5
| 87
| 0.342466
|
118062a5cf09c827e53317f2f178d4b31d081754
| 10,598
|
py
|
Python
|
hdp-ambari-mpack-3.1.4.0/stacks/HDP/3.0/services/YARN/package/alerts/alert_ats_hbase.py
|
dropoftruth/dfhz_hdp_mpack
|
716f0396dce25803365c1aed9904b74fbe396f79
|
[
"Apache-2.0"
] | 3
|
2022-01-05T10:10:36.000Z
|
2022-02-21T06:57:06.000Z
|
hdp-ambari-mpack-3.1.4.0/stacks/HDP/3.0/services/YARN/package/alerts/alert_ats_hbase.py
|
dropoftruth/dfhz_hdp_mpack
|
716f0396dce25803365c1aed9904b74fbe396f79
|
[
"Apache-2.0"
] | 13
|
2019-06-05T07:47:00.000Z
|
2019-12-29T08:29:27.000Z
|
hdp-ambari-mpack-3.1.4.0/stacks/HDP/3.0/services/YARN/package/alerts/alert_ats_hbase.py
|
dropoftruth/dfhz_hdp_mpack
|
716f0396dce25803365c1aed9904b74fbe396f79
|
[
"Apache-2.0"
] | 2
|
2022-01-05T09:09:20.000Z
|
2022-02-21T07:02:06.000Z
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import json
import subprocess
import time
import traceback
from resource_management.core import global_lock
from resource_management.core import shell
from resource_management.core.exceptions import Fail
from resource_management.core.resources import Execute
from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.core.exceptions import ComponentIsNotRunning
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
CRITICAL_RESULT_CODE = 'CRITICAL'
OK_RESULT_CODE = 'OK'
UKNOWN_STATUS_CODE = 'UNKNOWN'
OK_MESSAGE = "The HBase application reported a '{0}' state in {1:.3f}s"
MESSAGE_WITH_STATE_AND_INSTANCES = "The application reported a '{0}' state in {1:.3f}s. [Live: {2}, Desired: {3}]"
CRITICAL_MESSAGE_WITH_STATE = "The HBase application reported a '{0}' state. Check took {1:.3f}s"
CRITICAL_MESSAGE = "ats-hbase service information could not be retrieved"
SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
STACK_ROOT = '{{cluster-env/stack_root}}'
STACK_ROOT_DEFAULT = Script.get_stack_root()
ATS_HBASE_PRINCIPAL_KEY = '{{yarn-hbase-site/hbase.master.kerberos.principal}}'
ATS_HBASE_PRINCIPAL_KEYTAB_KEY = '{{yarn-hbase-site/hbase.master.keytab.file}}'
ATS_HBASE_USER_KEY = '{{yarn-env/yarn_ats_user}}'
ATS_HBASE_SYSTEM_SERVICE_LAUNCH_KEY = '{{yarn-hbase-env/is_hbase_system_service_launch}}'
USE_EXTERNAL_HBASE_KEY = '{{yarn-hbase-env/use_external_hbase}}'
ATS_HBASE_PID_DIR_PREFIX = '{{yarn-hbase-env/yarn_hbase_pid_dir_prefix}}'
ATS_HBASE_APP_NOT_FOUND_KEY = format("Service ats-hbase not found")
# The configured Kerberos executable search paths, if any
KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
CHECK_COMMAND_TIMEOUT_DEFAULT = 120.0
logger = logging.getLogger('ambari_alerts')
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
return (SECURITY_ENABLED_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, ATS_HBASE_PRINCIPAL_KEY, ATS_HBASE_PRINCIPAL_KEYTAB_KEY,
ATS_HBASE_USER_KEY, STACK_ROOT, USE_EXTERNAL_HBASE_KEY, ATS_HBASE_PID_DIR_PREFIX, ATS_HBASE_SYSTEM_SERVICE_LAUNCH_KEY)
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations (dictionary): a mapping of configuration key to value
parameters (dictionary): a mapping of script parameter key to value
host_name (string): the name of this host where the alert is running
"""
if configurations is None:
return (UKNOWN_STATUS_CODE, ['There were no configurations supplied to the script.'])
result_code = None
try:
use_external_hbase = False
if USE_EXTERNAL_HBASE_KEY in configurations:
use_external_hbase = str(configurations[USE_EXTERNAL_HBASE_KEY]).upper() == 'TRUE'
if use_external_hbase:
return (OK_RESULT_CODE, ['use_external_hbase set to true.'])
is_hbase_system_service_launch = False
if ATS_HBASE_SYSTEM_SERVICE_LAUNCH_KEY in configurations:
is_hbase_system_service_launch = str(configurations[ATS_HBASE_SYSTEM_SERVICE_LAUNCH_KEY]).upper() == 'TRUE'
yarn_hbase_user = "yarn-ats"
if ATS_HBASE_USER_KEY in configurations:
yarn_hbase_user = configurations[ATS_HBASE_USER_KEY]
if not is_hbase_system_service_launch:
yarn_hbase_pid_dir_prefix = ""
if ATS_HBASE_PID_DIR_PREFIX in configurations:
yarn_hbase_pid_dir_prefix = configurations[ATS_HBASE_PID_DIR_PREFIX]
else:
return (UKNOWN_STATUS_CODE, ['The yarn_hbase_pid_dir_prefix is a required parameter.'])
yarn_hbase_pid_dir = format("{yarn_hbase_pid_dir_prefix}/{yarn_hbase_user}")
master_pid_file = format("{yarn_hbase_pid_dir}/hbase-{yarn_hbase_user}-master.pid")
rs_pid_file = format("{yarn_hbase_pid_dir}/hbase-{yarn_hbase_user}-regionserver.pid")
if host_name is None:
host_name = socket.getfqdn()
master_process_running = is_monitor_process_live(master_pid_file)
rs_process_running = is_monitor_process_live(rs_pid_file)
alert_state = OK_RESULT_CODE if master_process_running and rs_process_running else CRITICAL_RESULT_CODE
alert_label = 'ATS embedded HBase is running on {0}' if master_process_running and rs_process_running else 'ATS embedded HBase is NOT running on {0}'
alert_label = alert_label.format(host_name)
return (alert_state, [alert_label])
else:
security_enabled = False
if SECURITY_ENABLED_KEY in configurations:
security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
check_command_timeout = CHECK_COMMAND_TIMEOUT_DEFAULT
if CHECK_COMMAND_TIMEOUT_KEY in configurations:
check_command_timeout = int(parameters[CHECK_COMMAND_TIMEOUT_KEY])
if security_enabled:
if ATS_HBASE_PRINCIPAL_KEY in configurations:
ats_hbase_app_principal = configurations[ATS_HBASE_PRINCIPAL_KEY]
ats_hbase_app_principal = ats_hbase_app_principal.replace('_HOST',host_name.lower())
if ATS_HBASE_PRINCIPAL_KEYTAB_KEY in configurations:
ats_hbase_app_keytab = configurations[ATS_HBASE_PRINCIPAL_KEYTAB_KEY]
# Get the configured Kerberos executable search paths, if any
if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
else:
kerberos_executable_search_paths = None
kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
kinitcmd=format("{kinit_path_local} -kt {ats_hbase_app_keytab} {ats_hbase_app_principal}; ")
# prevent concurrent kinit
kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
kinit_lock.acquire()
try:
Execute(kinitcmd, user=yarn_hbase_user,
path=["/bin/", "/usr/bin/", "/usr/sbin/"],
timeout=10)
finally:
kinit_lock.release()
start_time = time.time()
ats_hbase_status_cmd = STACK_ROOT_DEFAULT + format("/current/hadoop-yarn-client/bin/yarn app -status ats-hbase")
code, output, error = shell.checked_call(ats_hbase_status_cmd, user=yarn_hbase_user, stderr=subprocess.PIPE,
timeout=check_command_timeout,
logoutput=False)
if code != 0:
alert_label = traceback.format_exc()
result_code = UKNOWN_STATUS_CODE
return (result_code, [alert_label])
# Call for getting JSON
ats_hbase_app_info = make_valid_json(output)
if ats_hbase_app_info is None:
alert_label = CRITICAL_MESSAGE
result_code = CRITICAL_RESULT_CODE
return (result_code, [alert_label])
if 'state' not in ats_hbase_app_info:
alert_label = traceback.format_exc()
result_code = UKNOWN_STATUS_CODE
return (result_code, [alert_label])
retrieved_ats_hbase_app_state = ats_hbase_app_info['state'].upper()
if retrieved_ats_hbase_app_state in ['STABLE']:
result_code = OK_RESULT_CODE
total_time = time.time() - start_time
alert_label = OK_MESSAGE.format(retrieved_ats_hbase_app_state, total_time)
else:
result_code = CRITICAL_RESULT_CODE
total_time = time.time() - start_time
alert_label = CRITICAL_MESSAGE_WITH_STATE.format(retrieved_ats_hbase_app_state, total_time)
except:
alert_label = traceback.format_exc()
traceback.format_exc()
result_code = CRITICAL_RESULT_CODE
return (result_code, [alert_label])
def make_valid_json(output):
splits = output.split("\n")
ats_hbase_app_info = None
json_element = None # To detect where from to start reading for JSON data
for idx, split in enumerate(splits):
curr_elem = split.strip()
if curr_elem.startswith( '{' ) and curr_elem.endswith( '}' ):
json_element = curr_elem
break
elif ATS_HBASE_APP_NOT_FOUND_KEY in curr_elem:
return ats_hbase_app_info
# Remove extra logging from possible JSON output
if json_element is None:
raise Fail("Couldn't validate the received output for JSON parsing.")
ats_hbase_app_info = json.loads(json_element)
return ats_hbase_app_info
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def is_monitor_process_live(pid_file):
"""
Gets whether the Metrics Monitor represented by the specified file is running.
:param pid_file: the PID file of the monitor to check
:return: True if the monitor is running, False otherwise
"""
live = False
try:
check_process_status(pid_file)
live = True
except ComponentIsNotRunning:
pass
return live
| 43.081301
| 161
| 0.702208
|
4d20bff7ad9fe4b2d06702afa000a251018e0197
| 389
|
py
|
Python
|
toascii/converter.py
|
Iapetus-11/2ascii
|
7b77c22819f26ab8b8fa85b55fe1c022b0590bb4
|
[
"MIT"
] | 16
|
2020-11-17T14:20:19.000Z
|
2022-03-27T16:25:05.000Z
|
toascii/converter.py
|
Iapetus-11/2ascii
|
7b77c22819f26ab8b8fa85b55fe1c022b0590bb4
|
[
"MIT"
] | null | null | null |
toascii/converter.py
|
Iapetus-11/2ascii
|
7b77c22819f26ab8b8fa85b55fe1c022b0590bb4
|
[
"MIT"
] | 2
|
2021-10-11T21:26:41.000Z
|
2022-03-11T08:44:02.000Z
|
class Converter:
"""Base converter class, contains the asciify() method which is used in all Converter subclasses."""
def asciify(self, image):
for row in image:
for b, g, r in row:
lumination = 0.2126 * r + 0.7152 * g + 0.0722 * b
yield self.gradient[int((lumination / 255) * (self._gradient_len - 1))]
yield "\n"
| 35.363636
| 104
| 0.562982
|
ed11d9b069bf9cfe2becfeb30cad297b8361c18d
| 844
|
py
|
Python
|
setup.py
|
prokopst/clizy
|
c024be725377c05c3db186f463e18f70dc90b55e
|
[
"Apache-2.0"
] | 31
|
2018-04-20T01:05:47.000Z
|
2018-10-06T15:08:44.000Z
|
setup.py
|
prokopst/clizy
|
c024be725377c05c3db186f463e18f70dc90b55e
|
[
"Apache-2.0"
] | 5
|
2018-04-25T07:47:20.000Z
|
2018-10-16T06:31:32.000Z
|
setup.py
|
getclizy/clizy
|
c024be725377c05c3db186f463e18f70dc90b55e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup, find_packages
import sys
if sys.version_info < (3, 6):
raise RuntimeError("Python < 3.6 is not supported!")
with open('docs/README.md') as file:
long_description = file.read()
setup(
name='clizy',
version='0.0.2',
description="Command-line interface creation for lazy people using type hints.",
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/prokopst/clizy',
packages=find_packages(exclude=['tests']),
author="Stan Prokop",
license='Apache 2 License',
classifiers=[
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
]
)
| 30.142857
| 84
| 0.674171
|
b2eceee674a60198115c150d9874fcc672135c1b
| 936
|
py
|
Python
|
unittest_reinvent/running_modes/reinforcement_tests/test_margin_guard_mean_stats.py
|
lilleswing/Reinvent-1
|
ac4e3e6fa6379c6f4af883478dfd1b3407933ada
|
[
"Apache-2.0"
] | 183
|
2020-04-04T02:01:15.000Z
|
2022-03-30T21:56:56.000Z
|
unittest_reinvent/running_modes/reinforcement_tests/test_margin_guard_mean_stats.py
|
lilleswing/Reinvent-1
|
ac4e3e6fa6379c6f4af883478dfd1b3407933ada
|
[
"Apache-2.0"
] | 39
|
2020-04-05T15:19:56.000Z
|
2022-03-09T12:58:21.000Z
|
unittest_reinvent/running_modes/reinforcement_tests/test_margin_guard_mean_stats.py
|
lilleswing/Reinvent-1
|
ac4e3e6fa6379c6f4af883478dfd1b3407933ada
|
[
"Apache-2.0"
] | 70
|
2020-04-05T19:25:43.000Z
|
2022-02-22T12:04:39.000Z
|
import unittest
from unittest.mock import Mock
import torch
import numpy as np
from running_modes.reinforcement_learning.margin_guard import MarginGuard
class MarginGuardMeanStatsTest(unittest.TestCase):
def setUp(self) -> None:
self.runner = Mock()
self.mg = MarginGuard(self.runner)
self.agent_likelihood = torch.tensor([1., 2., 3.])
self.prior_likelihood = torch.tensor([4., 5., 6.])
self.augmented_likelihood = torch.tensor([7., 8., 9.])
self.score = np.array([1., 2., 3])
self.mg.store_run_stats(
self.agent_likelihood,
self.prior_likelihood,
self.augmented_likelihood,
self.score
)
def test_expected_mean(self):
mean_aug_lh = self.mg._get_mean_stats_field("augmented_likelihood")
expected_mean = self.augmented_likelihood.mean().item()
self.assertEqual(mean_aug_lh, expected_mean)
| 32.275862
| 75
| 0.66453
|
9c8f52389ca8af8762fae0edd609a8a97a982485
| 2,644
|
py
|
Python
|
scripts/run_tracking_eval_set.py
|
raphaelchang/omni_slam_eval
|
7df7d76c520c1325ac4f1a85f87b7af07d9628c3
|
[
"MIT"
] | 7
|
2020-06-15T01:04:10.000Z
|
2021-12-15T03:49:05.000Z
|
scripts/run_tracking_eval_set.py
|
raphaelchang/omni_slam_eval
|
7df7d76c520c1325ac4f1a85f87b7af07d9628c3
|
[
"MIT"
] | null | null | null |
scripts/run_tracking_eval_set.py
|
raphaelchang/omni_slam_eval
|
7df7d76c520c1325ac4f1a85f87b7af07d9628c3
|
[
"MIT"
] | 4
|
2020-06-15T16:02:12.000Z
|
2021-10-12T07:18:47.000Z
|
import roslaunch
import os
from parse import parse
import sys
import argparse
parser = argparse.ArgumentParser(description='Run tracking evaluation set')
parser.add_argument('working_dir', help='working directory')
parser.add_argument('--motion', type=str, help='motion type for motion set evaluation')
parser.add_argument("--rate", type=int, help='frame rate multiplier', default=1)
args = parser.parse_args()
parent = roslaunch.parent.ROSLaunchParent("", [], is_core=True)
parent.start()
if os.path.isdir(args.working_dir):
if args.motion is None:
print ''
print '==========================================='
print 'Full motion+FOV dataset tracking evaluation'
print '==========================================='
else:
print ''
print '==========================================='
print '{} motion dataset tracking evaluation'.format(args.motion)
print '==========================================='
fovs = []
for yaml in os.listdir(args.working_dir):
if not os.path.isdir(os.path.join(args.working_dir, yaml)) and yaml.endswith('.yaml'):
fov = os.path.splitext(os.path.basename(yaml))[0]
fovs.append(fov)
fovs.sort(key=int)
for motion in os.listdir(args.working_dir):
if os.path.isdir(os.path.join(args.working_dir, motion)):
if args.motion is not None and motion != args.motion:
continue
bag_dir = os.path.join(args.working_dir, motion)
for fov in fovs:
if args.motion is None:
printstr = "Motion type {}, FOV {}".format(motion, fov)
else:
printstr = "FOV {}".format(fov)
print ''
print '-' * len(printstr)
print printstr
print '-' * len(printstr)
print ''
fov_file = os.path.join(args.working_dir, fov + '.yaml')
for filename in os.listdir(bag_dir):
if filename.endswith('.bag') and not filename.endswith('.orig.bag'):
bag_file = os.path.abspath(os.path.join(bag_dir, filename))
sys.argv = ['roslaunch', 'omni_slam_eval', 'tracking_eval.launch', 'bag_file:={}'.format(bag_file), 'camera_file:={}'.format(fov_file), 'rate:={}'.format(args.rate)]
reload(roslaunch)
roslaunch.main()
print ''
print '==================='
print 'Evaluation complete'
print '==================='
else:
print '[ERROR] Invalid path specified'
parent.shutdown()
| 41.3125
| 189
| 0.538956
|
2f5599ce043dd86c33dd0bd6d53ad0dcd80c1f13
| 600
|
py
|
Python
|
read_parquet.py
|
gkovacs/crypocurrency-trading
|
74edbea2a3c3585b7df976bbe5bb0bab911cd030
|
[
"MIT"
] | 5
|
2018-05-02T19:41:55.000Z
|
2019-06-24T12:04:12.000Z
|
read_parquet.py
|
gkovacs/crypocurrency-trading
|
74edbea2a3c3585b7df976bbe5bb0bab911cd030
|
[
"MIT"
] | null | null | null |
read_parquet.py
|
gkovacs/crypocurrency-trading
|
74edbea2a3c3585b7df976bbe5bb0bab911cd030
|
[
"MIT"
] | 1
|
2018-11-15T16:25:22.000Z
|
2018-11-15T16:25:22.000Z
|
#!/usr/bin/env python3
import paratext
import pandas
import lz4.frame
import gzip
import io
import pyarrow.parquet as pq
import pyarrow as pa
'''
filepath = 'cboe/lz4_test/BTCUSD_order_book_20170627.csv.lz4'
#filepath = 'cboe/lz4_test/BTCUSD_order_book_20170627.csv.gz'
df = pandas.read_csv(io.TextIOWrapper(lz4.frame.open(filepath)))
#df = pandas.read_csv(filepath)
#df = paratext.load_csv_to_pandas(gzip.open(filepath).read())
print((df))
'''
from glob import glob
from plumbum.cmd import rm
for x in glob('cboe/parquet/*.parquet'):
print(x)
table = pq.read_table(x, columns=["Event ID"])
| 22.222222
| 64
| 0.756667
|
ebcee15ed226b37f75a0283649de27c10c8c0ecb
| 37,793
|
py
|
Python
|
despasito/equations_of_state/saft/gamma_sw.py
|
nikita-bykov/despasito
|
484ccd8d013bf83de79a28c605d0bdced90ca391
|
[
"BSD-3-Clause"
] | 3
|
2020-09-28T21:06:56.000Z
|
2022-02-06T20:21:00.000Z
|
despasito/equations_of_state/saft/gamma_sw.py
|
nikita-bykov/despasito
|
484ccd8d013bf83de79a28c605d0bdced90ca391
|
[
"BSD-3-Clause"
] | 20
|
2019-09-20T15:44:45.000Z
|
2021-04-05T18:11:20.000Z
|
despasito/equations_of_state/saft/gamma_sw.py
|
nikita-bykov/despasito
|
484ccd8d013bf83de79a28c605d0bdced90ca391
|
[
"BSD-3-Clause"
] | 6
|
2020-04-07T23:35:37.000Z
|
2021-11-18T13:17:24.000Z
|
# -- coding: utf8 --
r"""
EOS object for SAFT-:math:`\gamma`-SW
Equations referenced in this code are from Lymperiadis, A. et. al, J. Chem. Phys. 127, 234903 (2007)
"""
import numpy as np
import logging
import despasito.equations_of_state.eos_toolbox as tb
from despasito.equations_of_state import constants
import despasito.equations_of_state.saft.saft_toolbox as stb
from despasito.equations_of_state.saft import Aassoc
logger = logging.getLogger(__name__)
ckl_coef = np.array(
[
[2.25855, -1.50349, 0.249434],
[-0.669270, 1.40049, -0.827739],
[10.1576, -15.0427, 5.30827],
]
)
class SaftType:
r"""
Object of SAFT-𝛾-SW (for square well potential)
Parameters
----------
beads : list[str]
List of unique bead names used among components
molecular_composition : numpy.ndarray
:math:`\nu_{i,k}/k_B`. Array containing the number of components by the number of bead types. Defines the number of each type of group in each component.
bead_library : dict
A dictionary where bead names are the keys to access EOS self interaction parameters:
- epsilon: :math:`\epsilon_{k,k}/k_B`, Energy well depth scaled by Boltzmann constant
- sigma: :math:`\sigma_{k,k}`, Size parameter, contact distance [nm]
- lambda: :math:`\lambda_{k,k}`, Range of the attractive interaction of well depth, epsilon
- Sk: Optional, default=1, Shape factor, reflects the proportion which a given segment contributes to the total free energy
- Vks: Optional, default=1, Number of segments in this molecular group
cross_library : dict, Optional, default={}
Optional library of bead cross interaction parameters. As many or as few of the desired parameters may be defined for whichever group combinations are desired. If this matrix isn't provided, the SAFT ``combining_rules`` are used.
- epsilon: :math:`\epsilon_{k,l}/k_B`, Energy parameter, well depth, scaled by Boltzmann Constant
- sigma: :math:`\sigma_{k,k}`, Size parameter, contact distance [nm]
- lambda: :math:`\lambda_{k,l}`, Range of the attractive interaction of well depth, epsilon
num_rings : list
Number of rings in each molecule. This will impact the chain contribution to the Helmholtz energy.
Attributes
----------
beads : list[str]
List of unique bead names used among components
bead_library : dict
A dictionary where bead names are the keys to access EOS self interaction parameters. See entry in **Parameters** section.
cross_library : dict
Library of bead cross interaction parameters. As many or as few of the desired parameters may be defined for whichever group combinations are desired. See entry in **Parameters** section.
Aideal_method : str
"Abroglie" the default functional form of the ideal gas contribution of the Helmholtz energy
residual_helmholtz_contributions : list[str]
List of methods from the specified saft_source representing contributions to the Helmholtz energy that are functions of density, temperature, and composition. For this variant, [``Amonomer``, ``Achain``]
parameter_types : list[str]
This list of parameter names, "epsilon", "lambda", "sigma", and/or "Sk" as well as parameters for the specific SAFT variant.
parameter_bound_extreme : dict
With each parameter name as an entry representing a list with the minimum and maximum feasible parameter value.
- epsilon: [10.,1000.]
- lambda: [1.0,10.]
- sigma: [0.1,10.0]
- Sk: [0.1,1.0]
combining_rules : dict
Contains functional form and additional information for calculating cross interaction parameters that are not found in ``cross_library``. Function must be one of those contained in :mod:`~despasito.equations_of_state.combining_rule_types`. The default values are:
- sigma: {"function": "mean"}
- lambda: {"function": "weighted_mean","weighting_parameters": ["sigma"]}
- epsilon: {"function": "square_well_berthelot","weighting_parameters": ["sigma", "lambda"]}
eos_dict : dict
Dictionary of parameters and specific settings
- molecular_composition (numpy.ndarray) - :math:`\nu_{i,k}/k_B`. Array containing the number of components by the number of bead types. Defines the number of each type of group in each component.
- num_rings (list) - Number of rings in each molecule. This will impact the chain contribution to the Helmholtz energy.
- Sk (numpy.ndarray) - Shape factor, reflects the proportion which a given segment contributes to the total free energy. Length of ``beads`` array.
- Vks (numpy.ndarray) - Number of segments in this molecular group. Length of ``beads`` array.
- epsilon_kl (numpy.ndarray) - Matrix of well depths for groups (k,l)
- sigma_kl (numpy.ndarray) - Matrix of bead diameters (k,l)
- lambda_kl (numpy.ndarray) - Matrix of range of potential well depth (k,l)
- Cmol2seg (float) - Conversion factor from from molecular number density, :math:`\rho`, to segment (i.e. group) number density, :math:`\rho_S`.
- epsilon_ij (numpy.ndarray) - Matrix of average molecular well depths (k,l)
- sigma_ij (numpy.ndarray) - Matrix of average molecular diameter (k,l)
- lambda_ij (numpy.ndarray) - Matrix of average molecular range of potential well depth (k,l)
- xskl (numpy.ndarray) - Matrix of mole fractions of bead (i.e. segment or group) k multiplied by that of bead l
- alphakl (np.array) - van der Waals attractive parameter for square-well segments, equal to :math:`\alpha_{k,l}/k_B`.
ncomp : int
Number of components in the system
nbeads : int
Number of beads in system that are shared among components
xi : numpy.ndarray
Mole fraction of each molecule in mixture. Default initialization is np.nan
"""
def __init__(self, **kwargs):
if "method_stat" in kwargs:
self.method_stat = kwargs["method_stat"]
del kwargs["method_stat"]
logger.info("This EOS doesn't use compiled modules for Amonomer and Achain")
else:
self.method_stat = None
self.Aideal_method = "Abroglie"
self.residual_helmholtz_contributions = ["Amonomer", "Achain"]
self.parameter_types = ["epsilon", "lambda", "sigma", "Sk"]
self._parameter_defaults = {
"epsilon": None,
"lambda": None,
"sigma": None,
"Sk": 1.0,
"Vks": 1.0,
}
self.parameter_bound_extreme = {
"epsilon": [10.0, 1000.0],
"lambda": [1.0, 10.0],
"sigma": [0.1, 10.0],
"Sk": [0.1, 1.0],
}
self.combining_rules = {
"sigma": {"function": "mean"},
"lambda": {"function": "weighted_mean", "weighting_parameters": ["sigma"]},
"epsilon": {
"function": "square_well_berthelot",
"weighting_parameters": ["sigma", "lambda"],
},
} # Note in this EOS object, the combining rules for the group parameters are also used for their corresponding molecular averaged parameters.
if not hasattr(self, "eos_dict"):
self.eos_dict = {}
needed_attributes = ["molecular_composition", "beads", "bead_library"]
for key in needed_attributes:
if key not in kwargs:
raise ValueError(
"The one of the following inputs is missing: {}".format(
", ".join(tmp)
)
)
elif key == "molecular_composition":
if "molecular_composition" not in self.eos_dict:
self.eos_dict[key] = kwargs[key]
elif not hasattr(self, key):
setattr(self, key, kwargs[key])
self.bead_library = tb.check_bead_parameters(
self.bead_library, self._parameter_defaults
)
if "cross_library" not in kwargs:
self.cross_library = {}
else:
self.cross_library = kwargs["cross_library"]
if "Vks" not in self.eos_dict:
self.eos_dict["Vks"] = tb.extract_property(
"Vks", self.bead_library, self.beads, default=1.0
)
if "Sk" not in self.eos_dict:
self.eos_dict["Sk"] = tb.extract_property(
"Sk", self.bead_library, self.beads, default=1.0
)
# Initialize component attribute
if not hasattr(self, "xi"):
self.xi = np.nan
if not hasattr(self, "nbeads") or not hasattr(self, "ncomp"):
self.ncomp, self.nbeads = np.shape(self.eos_dict["molecular_composition"])
# Initiate cross interaction terms
output = tb.cross_interaction_from_dict(
self.beads,
self.bead_library,
self.combining_rules,
cross_library=self.cross_library,
)
self.eos_dict["sigma_kl"] = output["sigma"]
self.eos_dict["epsilon_kl"] = output["epsilon"]
self.eos_dict["lambda_kl"] = output["lambda"]
if "num_rings" in kwargs:
self.eos_dict["num_rings"] = kwargs["num_rings"]
logger.info(
"Accepted component ring structure: {}".format(kwargs["num_rings"])
)
else:
self.eos_dict["num_rings"] = np.zeros(
len(self.eos_dict["molecular_composition"])
)
# Initiate average interaction terms
self.calc_component_averaged_properties()
self.alphakl = (
2.0
* np.pi
/ 3.0
* self.eos_dict["epsilon_kl"]
* self.eos_dict["sigma_kl"] ** 3
* (self.eos_dict["lambda_kl"] ** 3 - 1.0)
)
def calc_component_averaged_properties(self):
r"""
Calculate component averaged properties specific to SAFT-𝛾-SW
Attributes
----------
eos_dict : dict
Dictionary of outputs, the following possibilities are calculated if all relevant beads have those properties.
- epsilon_ij (numpy.ndarray) - Matrix of average molecular well depths (k,l)
- sigma_ij (numpy.ndarray) - Matrix of average molecular diameter (k,l)
- lambda_ij (numpy.ndarray) - Matrix of average molecular range of potential well depth (k,l)
"""
ncomp, nbeads = np.shape(self.eos_dict["molecular_composition"])
zki = np.zeros((ncomp, nbeads), float)
zkinorm = np.zeros(ncomp, float)
epsilonii = np.zeros(ncomp, float)
sigmaii = np.zeros(ncomp, float)
lambdaii = np.zeros(ncomp, float)
# compute zki
for i in range(ncomp):
for k in range(nbeads):
zki[i, k] = (
self.eos_dict["molecular_composition"][i, k]
* self.eos_dict["Vks"][k]
* self.eos_dict["Sk"][k]
)
zkinorm[i] += zki[i, k]
for i in range(ncomp):
for k in range(nbeads):
zki[i, k] = zki[i, k] / zkinorm[i]
for i in range(ncomp):
for k in range(nbeads):
sigmaii[i] += zki[i, k] * self.eos_dict["sigma_kl"][k, k] ** 3
for l in range(nbeads):
epsilonii[i] += (
zki[i, k] * zki[i, l] * self.eos_dict["epsilon_kl"][k, l]
)
lambdaii[i] += (
zki[i, k] * zki[i, l] * self.eos_dict["lambda_kl"][k, l]
)
sigmaii[i] = sigmaii[i] ** (1.0 / 3.0)
input_dict = {"sigma": sigmaii, "lambda": lambdaii, "epsilon": epsilonii}
dummy_dict, dummy_labels = tb.construct_dummy_bead_library(input_dict)
output_dict = tb.cross_interaction_from_dict(
dummy_labels, dummy_dict, self.combining_rules
)
self.eos_dict["sigma_ij"] = output_dict["sigma"]
self.eos_dict["lambda_ij"] = output_dict["lambda"]
self.eos_dict["epsilon_ij"] = output_dict["epsilon"]
def reduced_density(self, rho, xi):
r"""
Reduced density matrix where the segment number density is reduced by powers of the size parameter, sigma.
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Returns
-------
zeta : numpy.ndarray
Reduced density matrix of length 4 of varying degrees of dependence on sigma. Units: [molecules/nm^3, molecules/nm^2, molecules/nm, molecules]
"""
self._check_density(rho)
self._check_composition_dependent_parameters(xi)
rho2 = rho * constants.molecule_per_nm3 * self.eos_dict["Cmol2seg"]
reduced_density = np.zeros((np.size(rho), 4))
for m in range(4):
reduced_density[:, m] = rho2 * (
np.sum(
np.sqrt(np.diag(self.eos_dict["xskl"]))
* (np.diag(self.eos_dict["sigma_kl"]) ** m)
)
* (np.pi / 6.0)
)
return reduced_density
def effective_packing_fraction(self, rho, xi, zetax=None, mode="normal"):
r"""
Effective packing fraction for SAFT-gamma with a square-wave potential
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
zetax : numpy.ndarray, Optional, default=None
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
mode : str, Optional, default="normal"
This indicates whether group or effective component parameters are used. Options include: "normal" and "effective"
Returns
-------
zeta_eff : numpy.ndarray
Effective packing fraction (len(rho), Nbeads, Nbeads)
"""
self._check_density(rho)
self._check_composition_dependent_parameters(xi)
if mode == "normal":
lambdakl = self.eos_dict["lambda_kl"]
elif mode == "effective":
lambdakl = self.eos_dict["lambda_ij"]
lx = len(lambdakl) # lx is nbeads for normal and ncomp for effective
if zetax is None:
zetax = self.reduced_density(rho, xi)[:, 3]
zetax_pow = np.zeros((np.size(rho), 3))
zetax_pow[:, 0] = zetax
for i in range(1, 3):
zetax_pow[:, i] = zetax_pow[:, i - 1] * zetax_pow[:, 0]
zetakl = np.zeros((np.size(rho), lx, lx))
for k in range(lx):
for l in range(lx):
if lambdakl[k, l] != 0.0:
cikl = np.dot(
ckl_coef,
np.array(
(1.0, lambdakl[k, l], lambdakl[k, l] ** 2),
dtype=ckl_coef.dtype,
),
)
zetakl[:, k, l] = np.dot(zetax_pow, cikl)
return zetakl
def _dzetaeff_dzetax(self, rho, xi, zetax=None, mode="normal"):
r"""
Derivative of effective packing fraction with respect to the reduced density. Eq. 33
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
zetax : numpy.ndarray, Optional, default=None
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
mode : str, Optional, default="normal"
This indicates whether group or effective component parameters are used. Options include: "normal" and "effective"
Returns
-------
dzetakl : numpy.ndarray
Derivative of effective packing fraction (len(rho), Nbeads, Nbeads) with respect to the reduced density
"""
self._check_density(rho)
self._check_composition_dependent_parameters(xi)
if mode == "normal":
lambdakl = self.eos_dict["lambda_kl"]
elif mode == "effective":
lambdakl = self.eos_dict["lambda_ij"]
lx = len(lambdakl) # lx is nbeads for normal and ncomp for effective
if zetax is None:
zetax = self.reduced_density(rho, xi)[:, 3]
zetax_pow = np.transpose(
np.array([np.ones(len(rho)), 2 * zetax, 3 * zetax ** 2])
)
# check if you have more than 1 bead types
dzetakl = np.zeros((np.size(rho), lx, lx))
for k in range(lx):
for l in range(lx):
if lambdakl[k, l] != 0.0:
cikl = np.dot(
ckl_coef,
np.array(
(1.0, lambdakl[k, l], lambdakl[k, l] ** 2),
dtype=ckl_coef.dtype,
),
)
dzetakl[:, k, l] = np.dot(zetax_pow, cikl)
return dzetakl
def Ahard_sphere(self, rho, T, xi):
r"""
Outputs hard sphere approximation of Helmholtz free energy, :math:`A^{HS}/Nk_{b}T`.
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
T : float
Temperature of the system [K]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Returns
-------
Ahard_sphere : numpy.ndarray
Helmholtz energy of monomers for each density given.
"""
rho = self._check_density(rho)
self._check_composition_dependent_parameters(xi)
zeta = self.reduced_density(rho, xi)
tmp = 6.0 / (np.pi * rho * constants.molecule_per_nm3)
tmp1 = np.log1p(-zeta[:, 3]) * (
zeta[:, 2] ** 3 / (zeta[:, 3] ** 2) - zeta[:, 0]
)
tmp2 = 3.0 * zeta[:, 2] / (1 - zeta[:, 3]) * zeta[:, 1]
tmp3 = zeta[:, 2] ** 3 / (zeta[:, 3] * ((1.0 - zeta[:, 3]) ** 2))
AHS = tmp * (tmp1 + tmp2 + tmp3)
return AHS
def Afirst_order(self, rho, T, xi, zetax=None):
r"""
Outputs :math:`A^{1st order}/Nk_{b}T`. This is the first order term in the high-temperature perturbation expansion
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
T : float
Temperature of the system [K]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
zetax : numpy.ndarray, Optional, default=None
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
Returns
-------
Afirst_order : numpy.ndarray
Helmholtz energy of monomers for each density given.
"""
rho = self._check_density(rho)
self._check_composition_dependent_parameters(xi)
if zetax is None:
zetax = self.reduced_density(rho, xi)[:, 3]
g0HS = self.calc_g0HS(rho, xi, zetax=zetax)
a1kl_tmp = np.tensordot(
rho * constants.molecule_per_nm3, self.eos_dict["xskl"] * self.alphakl, 0
)
A1 = -(self.eos_dict["Cmol2seg"] ** 2 / T) * np.sum(
a1kl_tmp * g0HS, axis=(1, 2)
) # Units of K
return A1
def Asecond_order(self, rho, T, xi, zetax=None, KHS=None):
r"""
Outputs :math:`A^{2nd order}/Nk_{b}T`. This is the second order term in the high-temperature perturbation expansion
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
T : float
Temperature of the system [K]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
zetax : numpy.ndarray, Optional, default=None
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
KHS : numpy.ndarray, Optional, default=None
(length of densities) isothermal compressibility of system with packing fraction zetax
Returns
-------
Asecond_order : numpy.ndarray
Helmholtz energy of monomers for each density given.
"""
rho = self._check_density(rho)
self._check_composition_dependent_parameters(xi)
if zetax is None:
zetax = self.reduced_density(rho, xi)[:, 3]
# Note that zetax = zeta3
if KHS is None:
KHS = stb.calc_KHS(zetax)
dzetakl = self._dzetaeff_dzetax(rho, xi, zetax=zetax)
zeta_eff = self.effective_packing_fraction(rho, xi, zetax=zetax)
g0HS = self.calc_g0HS(rho, xi, zetax=zetax)
rho2 = self.eos_dict["Cmol2seg"] * rho * constants.molecule_per_nm3
tmp1 = KHS * rho2 / 2.0
tmp2 = self.eos_dict["epsilon_kl"] * self.alphakl * self.eos_dict["xskl"]
a2kl_tmp = np.tensordot(tmp1, tmp2, 0)
a2 = a2kl_tmp * (
g0HS
+ zetax[:, np.newaxis, np.newaxis]
* dzetakl
* (2.5 - zeta_eff)
/ (1 - zeta_eff) ** 4
)
# Lymperiadis 2007 has a disconnect where Eq. 24 != Eq. 30, as Eq. 24 is missing a minus sign. (Same in Lymperiadis 2008 for Eq. 32 and Eq. 38)
A2 = -(self.eos_dict["Cmol2seg"] / (T ** 2)) * np.sum(a2, axis=(1, 2))
return A2
def Amonomer(self, rho, T, xi):
r"""
Outputs the monomer contribution of the Helmholtz energy :math:`A^{mono.}/Nk_{b}T`.
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
T : float
Temperature of the system [K]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Returns
-------
Amonomer : numpy.ndarray
Helmholtz energy of monomers for each density given.
"""
if np.all(rho > self.density_max(xi, T)):
raise ValueError(
"Density values should not all be greater than {}, or calc_Amono will fail in log calculation.".format(
self.density_max(xi, T)
)
)
rho = self._check_density(rho)
self._check_composition_dependent_parameters(xi)
zetax = self.reduced_density(rho, xi)[:, 3]
Amonomer = (
self.Ahard_sphere(rho, T, xi)
+ self.Afirst_order(rho, T, xi, zetax=zetax)
+ self.Asecond_order(rho, T, xi, zetax=zetax)
)
return Amonomer
def calc_g0HS(self, rho, xi, zetax=None, mode="normal"):
r"""
The contact value of the pair correlation function of a hypothetical pure fluid of diameter sigmax evaluated at an effective packing fraction, zeta_eff.
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
zetax : numpy.ndarray, Optional, default=None
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
mode : str, Optional, default="normal"
This indicates whether group or effective component parameters are used. Options include: "normal" and "effective", where normal used bead interaction matrices, and effective uses component averaged parameters.
Returns
-------
g0HS : numpy.ndarray
The contact value of the pair correlation function of a hypothetical pure fluid
"""
rho = self._check_density(rho)
self._check_composition_dependent_parameters(xi)
if zetax is None:
zetax = self.reduced_density(rho, xi)[:, 3]
zeta_eff = self.effective_packing_fraction(rho, xi, mode=mode, zetax=zetax)
g0HS = (1.0 - zeta_eff / 2.0) / (1.0 - zeta_eff) ** 3
return g0HS
def calc_gHS(self, rho, xi):
r"""
Hypothetical pair correlation function of a hypothetical pure fluid.
This fluid is of diameter sigmax evaluated at contact and effective packing fraction zeta_eff.
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Returns
-------
gHS : numpy.ndarray
Hypothetical pair correlation function of a hypothetical pure fluid of diameter sigmax evaluated at contact and effective packing fraction zeta_eff.
"""
rho = self._check_density(rho)
self._check_composition_dependent_parameters(xi)
zetam = self.reduced_density(rho, xi)
tmp1 = 1.0 / (1.0 - zetam[:, 3])
tmp2 = zetam[:, 2] / (1.0 - zetam[:, 3]) ** 2
tmp3 = zetam[:, 2] ** 2 / (1.0 - zetam[:, 3]) ** 3
gHS = np.zeros((np.size(rho), self.ncomp, self.ncomp))
for i in range(self.ncomp):
for j in range(self.ncomp):
tmp = (
self.eos_dict["sigma_ij"][i, i]
* self.eos_dict["sigma_ij"][j, j]
/ (
self.eos_dict["sigma_ij"][i, i]
+ self.eos_dict["sigma_ij"][j, j]
)
)
gHS[:, i, j] = tmp1 + 3 * tmp * tmp2 + 2 * tmp ** 2 * tmp3
return gHS
def calc_gSW(self, rho, T, xi, zetax=None):
r"""
Calculate the square-well pair correlation function at the effective contact distance and the actual packing fraction of the mixture.
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
T : float
Temperature of the system [K]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
zetax : numpy.ndarray, Optional, default=None
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
Returns
-------
gSW : numpy.ndarray
Square-well pair correlation function at the effective contact distance and the actual packing fraction of the mixture.
"""
rho = self._check_density(rho)
self._check_composition_dependent_parameters(xi)
if zetax is None:
zetax = self.reduced_density(rho, xi)[:, 3]
g0HS = self.calc_g0HS(rho, xi, zetax=zetax, mode="effective")
gHS = self.calc_gHS(rho, xi)
zeta_eff = self.effective_packing_fraction(
rho, xi, mode="effective", zetax=zetax
)
dg0HSdzetaeff = (2.5 - zeta_eff) / (1.0 - zeta_eff) ** 4
ncomp = len(xi)
dckl_coef = np.array(
[[-1.50349, 0.249434], [1.40049, -0.827739], [-15.0427, 5.30827]]
)
zetax_pow = np.transpose(np.array([zetax, zetax ** 2, zetax ** 3]))
dzetaijdlambda = np.zeros((np.size(rho), ncomp, ncomp))
for i in range(ncomp):
for j in range(ncomp):
cikl = np.dot(
dckl_coef, np.array([1.0, (2 * self.eos_dict["lambda_ij"][i, j])])
)
dzetaijdlambda[:, i, j] = np.dot(zetax_pow, cikl)
dzetaijdzetax = self._dzetaeff_dzetax(rho, xi, zetax=zetax, mode="effective")
dzetaeff = (
self.eos_dict["lambda_ij"][np.newaxis, :, :] / 3.0 * dzetaijdlambda
- zetax[:, np.newaxis, np.newaxis] * dzetaijdzetax
)
gSW = gHS + self.eos_dict["epsilon_ij"][np.newaxis, :, :] / T * (
g0HS
+ (self.eos_dict["lambda_ij"][np.newaxis, :, :] ** 3 - 1.0)
* dg0HSdzetaeff
* dzetaeff
)
return gSW
def Achain(self, rho, T, xi):
r"""
Outputs chain contribution to the Helmholtz energy :math:`A^{chain}/Nk_{b}T`.
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
T : float
Temperature of the system [K]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Returns
-------
Achain : numpy.ndarray
Helmholtz energy of monomers for each density given.
"""
rho = self._check_density(rho)
gii = self.calc_gSW(rho, T, xi)
Achain = 0.0
for i in range(self.ncomp):
beadsum = -1.0 + self.eos_dict["num_rings"][i]
for k in range(self.nbeads):
beadsum += (
self.eos_dict["molecular_composition"][i, k]
* self.eos_dict["Vks"][k]
* self.eos_dict["Sk"][k]
)
Achain -= xi[i] * beadsum * np.log(gii[:, i, i])
if np.any(np.isnan(Achain)):
logger.error("Some Helmholtz values are NaN, check energy parameters.")
return Achain
def density_max(self, xi, T, maxpack=0.65):
"""
Estimate the maximum density based on the hard sphere packing fraction.
Parameters
----------
xi : list[float]
Mole fraction of each component
T : float
Temperature of the system [K]
maxpack : float, Optional, default=0.65
Maximum packing fraction
Returns
-------
max_density : float
Maximum molar density [:math:`mol/m^3`]
"""
self._check_composition_dependent_parameters(xi)
# estimate the maximum density based on the hard sphere packing fraction
# etax, assuming a maximum packing fraction specified by maxpack
max_density = (
maxpack
* 6.0
/ (
self.eos_dict["Cmol2seg"]
* np.pi
* np.sum(self.eos_dict["xskl"] * (self.eos_dict["sigma_kl"] ** 3))
)
/ constants.molecule_per_nm3
)
return max_density
def calc_gr_assoc(self, rho, T, xi, Ktype="ijklab"):
r"""
Reference fluid pair correlation function used in calculating association sites
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
T : float
Temperature of the system [K]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Ktype : str, Optional, default='ijklab'
Indicates which radial distribution function to return. The only option is 'ijklab': The bonding volume was calculated from self.calc_Kijklab, return gHS_dij)
Returns
-------
gr : numpy.ndarray
A temperature-density polynomial correlation of the association integral for a Lennard-Jones monomer. This matrix is (len(rho) x Ncomp x Ncomp)
"""
rho = self._check_density(rho)
gSW = self.calc_gSW(rho, T, xi)
return gSW
def calc_Kijklab(self, T, rc_klab, rd_klab=None, reduction_ratio=0.25):
r"""
Calculation of association site bonding volume, dependent on molecule in addition to group
Lymperiadis Fluid Phase Equilibria 274 (2008) 85–104
Parameters
----------
T : float
Temperature of the system [K], Note used in this version of saft, but included to allow saft.py to be general
rc_klab : numpy.ndarray
This matrix of cutoff distances for association sites for each site type in each group type
rd_klab : numpy.ndarray, Optional, default=None
Position of association site in each group (nbead, nbead, nsite, nsite)
reduction_ratio : float, Optional, default=0.25
Reduced distance of the sites from the center of the sphere of interaction. This value is used when site position, rd_klab is None
Returns
-------
Kijklab : numpy.ndarray
Matrix of binding volumes
"""
dij_bar = np.zeros((self.ncomp, self.ncomp))
for i in range(self.ncomp):
for j in range(self.ncomp):
dij_bar[i, j] = np.mean(
[self.eos_dict["sigma_ij"][i], self.eos_dict["sigma_ij"][j]]
)
Kijklab = Aassoc.calc_bonding_volume(
rc_klab, dij_bar, rd_klab=rd_klab, reduction_ratio=reduction_ratio
)
return Kijklab
def parameter_refresh(self, bead_library, cross_library):
r"""
To refresh dependent parameters
Those parameters that are dependent on bead_library and cross_library attributes **must** be updated by running this function after all parameters from update_parameters method have been changed.
Attributes
----------
alpha : np.array
van der Waals attractive parameter for square-well segments, equal to :math:`\alpha_{k,l}/k_B`.
eos_dict : dict
The following entries are updated:
- epsilon_kl (numpy.ndarray) - Matrix of well depths for groups (k,l)
- sigma_kl (numpy.ndarray) - Matrix of bead diameters (k,l)
- lambda_kl (numpy.ndarray) - Matrix of range of potential well depth (k,l)
- xskl (numpy.ndarray) - Matrix of mole fractions of bead (i.e. segment or group) k multiplied by that of bead l
- Cmol2seg (float) - Conversion factor from from molecular number density, :math:`\rho`, to segment (i.e. group) number density, :math:`\rho_S`.
"""
self.bead_library.update(bead_library)
self.cross_library.update(cross_library)
self.eos_dict["Sk"] = tb.extract_property(
"Sk", self.bead_library, self.beads, default=1.0
)
# Update Non bonded matrices
output = tb.cross_interaction_from_dict(
self.beads,
self.bead_library,
self.combining_rules,
cross_library=self.cross_library,
)
self.eos_dict["sigma_kl"] = output["sigma"]
self.eos_dict["epsilon_kl"] = output["epsilon"]
self.eos_dict["lambda_kl"] = output["lambda"]
self.calc_component_averaged_properties()
if not np.any(np.isnan(self.xi)):
self.eos_dict["Cmol2seg"], self.eos_dict[
"xskl"
] = stb.calc_composition_dependent_variables(
self.xi,
self.eos_dict["molecular_composition"],
self.bead_library,
self.beads,
)
self.alphakl = (
2.0
* np.pi
/ 3.0
* self.eos_dict["epsilon_kl"]
* self.eos_dict["sigma_kl"] ** 3
* (self.eos_dict["lambda_kl"] ** 3 - 1.0)
)
def _check_density(self, rho):
r"""
This function checks that the density array is in the correct format for further calculations.
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
Returns
-------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
"""
if np.isscalar(rho):
rho = np.array([rho])
elif type(rho) != np.ndarray:
rho = np.array(rho)
if len(np.shape(rho)) == 2:
rho = rho[0]
if any(np.isnan(rho)):
raise ValueError("NaN was given as a value of density, rho")
elif rho.size == 0:
raise ValueError("No value of density was given")
elif any(rho < 0.0):
raise ValueError("Density values cannot be negative.")
return rho
def _check_composition_dependent_parameters(self, xi):
r"""
This function updates composition dependent variables
Parameters
----------
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Attributes
----------
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
eos_dict : dict
The following entries are updated:
- xskl (numpy.ndarray) - Matrix of mole fractions of bead (i.e. segment or group) k multiplied by that of bead l
- Cmol2seg (float) - Conversion factor from from molecular number density, :math:`\rho`, to segment (i.e. group) number density, :math:`\rho_S`.
"""
xi = np.array(xi)
if not np.all(self.xi == xi):
self.eos_dict["Cmol2seg"], self.eos_dict[
"xskl"
] = stb.calc_composition_dependent_variables(
xi,
self.eos_dict["molecular_composition"],
self.bead_library,
self.beads,
)
self.xi = xi
def __str__(self):
string = "Beads: {}".format(self.beads)
return string
| 38.329615
| 271
| 0.570741
|
2aeba7e503587515f45dd368c4496fa8915beaf8
| 743
|
py
|
Python
|
python/variable.py
|
itzsoumyadip/vs
|
acf32cd0bacb26e62854060e0acf5eb41b7a68c8
|
[
"Unlicense"
] | 1
|
2019-07-05T04:27:05.000Z
|
2019-07-05T04:27:05.000Z
|
python/variable.py
|
itzsoumyadip/vs
|
acf32cd0bacb26e62854060e0acf5eb41b7a68c8
|
[
"Unlicense"
] | null | null | null |
python/variable.py
|
itzsoumyadip/vs
|
acf32cd0bacb26e62854060e0acf5eb41b7a68c8
|
[
"Unlicense"
] | null | null | null |
## IN PYTHON COSTANT VARIBLE CAN NOT CREATED
num =5
num2 = num
print(id(num)) # returns memory address of the varible
print(id(num) ,id(num2)) # both nun and num2 are pointing towards same Data and Address this is why python is more memory effecient
print(id(5)) # address is not based on varible name it base on box (object) itself
k=5
print(id(k)) # indirectly k is reffering to same box of 5 so its id will be same as 5's id
# num
# |
# V
# -----
# num2---> | 5 |
# ---
# ^
# |
# K
print(type(num)) # return the type of variable example int,float..
name="SAHA"
print(id(name))
print(name)
| 19.051282
| 135
| 0.546433
|
920e621975c812f0d89afae659fc1f7db89e2c65
| 819
|
py
|
Python
|
src/feature/utils.py
|
0shimax/Easy-Over-Complete-Distribution
|
3a646bdb819d961cd77725eb31bbaa47ee2a0be9
|
[
"MIT"
] | 5
|
2020-10-16T03:07:05.000Z
|
2022-01-03T07:52:18.000Z
|
src/feature/utils.py
|
0shimax/Easy-Over-Complete-Distribution
|
3a646bdb819d961cd77725eb31bbaa47ee2a0be9
|
[
"MIT"
] | 1
|
2021-01-20T02:29:51.000Z
|
2021-01-20T02:29:51.000Z
|
src/feature/utils.py
|
0shimax/Easy-Over-Complete-Distribution
|
3a646bdb819d961cd77725eb31bbaa47ee2a0be9
|
[
"MIT"
] | 1
|
2021-12-15T12:37:28.000Z
|
2021-12-15T12:37:28.000Z
|
import io
from torchvision import transforms
import torch
from pathlib import Path
from PIL import Image
class ImageTransform(object):
def __init__(self):
pass
def __call__(self, x):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = transforms.Compose(
# [transforms.Resize(256),
# transforms.CenterCrop(224),
[transforms.Resize(76),
transforms.CenterCrop(64),
transforms.ToTensor(),
normalize,
])
return transform(x)
def one_hot(labels, class_size):
targets = torch.zeros(labels.size(0), class_size)
for i, label in enumerate(labels):
targets[i, label] = 1
return targets
| 26.419355
| 68
| 0.581197
|
cc4438463f39ed8c523ce6f53519b4b16bf07a9d
| 893
|
py
|
Python
|
vtkplotter_examples/basic/lorenz.py
|
ismarou/vtkplotter-examples
|
1eefcc026be169ab7a77a5bce6dec8044c33b554
|
[
"MIT"
] | 4
|
2020-07-30T02:38:29.000Z
|
2021-09-12T14:30:18.000Z
|
vtkplotter_examples/basic/lorenz.py
|
ismarou/vtkplotter-examples
|
1eefcc026be169ab7a77a5bce6dec8044c33b554
|
[
"MIT"
] | null | null | null |
vtkplotter_examples/basic/lorenz.py
|
ismarou/vtkplotter-examples
|
1eefcc026be169ab7a77a5bce6dec8044c33b554
|
[
"MIT"
] | null | null | null |
import numpy as np
dt = 0.002
y = (25.0, -10.0, -7.0) # Starting point (initial condition)
pts, cols = [], []
for t in np.linspace(0, 20, int(20 / dt)):
# Integrate a funny differential equation
dydt = np.array(
[-8 / 3.0 * y[0] + y[1] * y[2],
-10.0 * (y[1] - y[2]),
-y[1] * y[0] + 28.0 * y[1] - y[2]]
)
y = y + dydt * dt
c = np.clip([np.linalg.norm(dydt) * 0.005], 0, 1)[0] # color by speed
cols.append([c, 0, 1-c])
pts.append(y)
from vtkplotter import Plotter, Line, Point, Points, settings
settings.renderPointsAsSpheres = False # render points as squares
scene = Plotter(title="Lorenz attractor", axes=1, verbose=0)
scene += Point(y, r=10, c="g") # end point
scene += Points(pts, r=3, c=cols)
scene += Line(pts).off().addShadow(x=3) # only show shadow, not line
scene += Line(pts).off().addShadow(z=-30)
scene.show(viewup='z')
| 29.766667
| 74
| 0.586786
|
656734fa6739f4ddf8870d24f93442e61ab8fb64
| 9,892
|
py
|
Python
|
boto/sqs/message.py
|
Yurzs/boto
|
d739d6c52877699206e69b9901bbe92ea437ba5d
|
[
"MIT"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
boto/sqs/message.py
|
Yurzs/boto
|
d739d6c52877699206e69b9901bbe92ea437ba5d
|
[
"MIT"
] | 4,640
|
2015-07-08T16:19:08.000Z
|
2019-12-02T15:01:27.000Z
|
boto/sqs/message.py
|
Yurzs/boto
|
d739d6c52877699206e69b9901bbe92ea437ba5d
|
[
"MIT"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
SQS Message
A Message represents the data stored in an SQS queue. The rules for what is allowed within an SQS
Message are here:
http://docs.amazonwebservices.com/AWSSimpleQueueService/2008-01-01/SQSDeveloperGuide/Query_QuerySendMessage.html
So, at it's simplest level a Message just needs to allow a developer to store bytes in it and get the bytes
back out. However, to allow messages to have richer semantics, the Message class must support the
following interfaces:
The constructor for the Message class must accept a keyword parameter "queue" which is an instance of a
boto Queue object and represents the queue that the message will be stored in. The default value for
this parameter is None.
The constructor for the Message class must accept a keyword parameter "body" which represents the
content or body of the message. The format of this parameter will depend on the behavior of the
particular Message subclass. For example, if the Message subclass provides dictionary-like behavior to the
user the body passed to the constructor should be a dict-like object that can be used to populate
the initial state of the message.
The Message class must provide an encode method that accepts a value of the same type as the body
parameter of the constructor and returns a string of characters that are able to be stored in an
SQS message body (see rules above).
The Message class must provide a decode method that accepts a string of characters that can be
stored (and probably were stored!) in an SQS message and return an object of a type that is consistent
with the "body" parameter accepted on the class constructor.
The Message class must provide a __len__ method that will return the size of the encoded message
that would be stored in SQS based on the current state of the Message object.
The Message class must provide a get_body method that will return the body of the message in the
same format accepted in the constructor of the class.
The Message class must provide a set_body method that accepts a message body in the same format
accepted by the constructor of the class. This method should alter to the internal state of the
Message object to reflect the state represented in the message body parameter.
The Message class must provide a get_body_encoded method that returns the current body of the message
in the format in which it would be stored in SQS.
"""
import base64
import boto
from boto.compat import StringIO
from boto.compat import six
from boto.sqs.attributes import Attributes
from boto.sqs.messageattributes import MessageAttributes
from boto.exception import SQSDecodeError
class RawMessage(object):
"""
Base class for SQS messages. RawMessage does not encode the message
in any way. Whatever you store in the body of the message is what
will be written to SQS and whatever is returned from SQS is stored
directly into the body of the message.
"""
def __init__(self, queue=None, body=''):
self.queue = queue
self.set_body(body)
self.id = None
self.receipt_handle = None
self.md5 = None
self.attributes = Attributes(self)
self.message_attributes = MessageAttributes(self)
self.md5_message_attributes = None
def __len__(self):
return len(self.encode(self._body))
def startElement(self, name, attrs, connection):
if name == 'Attribute':
return self.attributes
if name == 'MessageAttribute':
return self.message_attributes
return None
def endElement(self, name, value, connection):
if name == 'Body':
self.set_body(value)
elif name == 'MessageId':
self.id = value
elif name == 'ReceiptHandle':
self.receipt_handle = value
elif name == 'MD5OfBody':
self.md5 = value
elif name == 'MD5OfMessageAttributes':
self.md5_message_attributes = value
else:
setattr(self, name, value)
def endNode(self, connection):
self.set_body(self.decode(self.get_body()))
def encode(self, value):
"""Transform body object into serialized byte array format."""
return value
def decode(self, value):
"""Transform seralized byte array into any object."""
return value
def set_body(self, body):
"""Override the current body for this object, using decoded format."""
self._body = body
def get_body(self):
return self._body
def get_body_encoded(self):
"""
This method is really a semi-private method used by the Queue.write
method when writing the contents of the message to SQS.
You probably shouldn't need to call this method in the normal course of events.
"""
return self.encode(self.get_body())
def delete(self):
if self.queue:
return self.queue.delete_message(self)
def change_visibility(self, visibility_timeout):
if self.queue:
self.queue.connection.change_message_visibility(self.queue,
self.receipt_handle,
visibility_timeout)
class Message(RawMessage):
"""
The default Message class used for SQS queues. This class automatically
encodes/decodes the message body using Base64 encoding to avoid any
illegal characters in the message body. See:
https://forums.aws.amazon.com/thread.jspa?threadID=13067
for details on why this is a good idea. The encode/decode is meant to
be transparent to the end-user.
"""
def encode(self, value):
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
return base64.b64encode(value).decode('utf-8')
def decode(self, value):
try:
value = base64.b64decode(value.encode('utf-8')).decode('utf-8')
except:
boto.log.warning('Unable to decode message')
return value
return value
class MHMessage(Message):
"""
The MHMessage class provides a message that provides RFC821-like
headers like this:
HeaderName: HeaderValue
The encoding/decoding of this is handled automatically and after
the message body has been read, the message instance can be treated
like a mapping object, i.e. m['HeaderName'] would return 'HeaderValue'.
"""
def __init__(self, queue=None, body=None, xml_attrs=None):
if body is None or body == '':
body = {}
super(MHMessage, self).__init__(queue, body)
def decode(self, value):
try:
msg = {}
fp = StringIO(value)
line = fp.readline()
while line:
delim = line.find(':')
key = line[0:delim]
value = line[delim+1:].strip()
msg[key.strip()] = value.strip()
line = fp.readline()
except:
raise SQSDecodeError('Unable to decode message', self)
return msg
def encode(self, value):
s = ''
for item in value.items():
s = s + '%s: %s\n' % (item[0], item[1])
return s
def __contains__(self, key):
return key in self._body
def __getitem__(self, key):
if key in self._body:
return self._body[key]
else:
raise KeyError(key)
def __setitem__(self, key, value):
self._body[key] = value
self.set_body(self._body)
def keys(self):
return self._body.keys()
def values(self):
return self._body.values()
def items(self):
return self._body.items()
def has_key(self, key):
return key in self._body
def update(self, d):
self._body.update(d)
self.set_body(self._body)
def get(self, key, default=None):
return self._body.get(key, default)
class EncodedMHMessage(MHMessage):
"""
The EncodedMHMessage class provides a message that provides RFC821-like
headers like this:
HeaderName: HeaderValue
This variation encodes/decodes the body of the message in base64 automatically.
The message instance can be treated like a mapping object,
i.e. m['HeaderName'] would return 'HeaderValue'.
"""
def decode(self, value):
try:
value = base64.b64decode(value.encode('utf-8')).decode('utf-8')
except:
raise SQSDecodeError('Unable to decode message', self)
return super(EncodedMHMessage, self).decode(value)
def encode(self, value):
value = super(EncodedMHMessage, self).encode(value)
return base64.b64encode(value.encode('utf-8')).decode('utf-8')
| 36.367647
| 116
| 0.675596
|
10856ffbb31c03d47dbb0fe294abb6586e373999
| 779
|
py
|
Python
|
teryt_tree/migrations/0007_auto_20191020_2005.py
|
rwakulszowa/django-teryt-tree
|
ef7562ac36626a9f6f5292da26eb9d63da4f1806
|
[
"BSD-3-Clause"
] | 3
|
2016-07-04T08:58:51.000Z
|
2019-06-23T14:58:41.000Z
|
teryt_tree/migrations/0007_auto_20191020_2005.py
|
rwakulszowa/django-teryt-tree
|
ef7562ac36626a9f6f5292da26eb9d63da4f1806
|
[
"BSD-3-Clause"
] | 5
|
2017-12-03T04:28:01.000Z
|
2020-01-07T00:36:28.000Z
|
teryt_tree/migrations/0007_auto_20191020_2005.py
|
watchdogpolska/django-teryt-tree
|
0f7bc7ea5d027d37b6929777e65a27286412209f
|
[
"BSD-3-Clause"
] | 4
|
2017-12-02T22:25:44.000Z
|
2020-10-31T20:18:57.000Z
|
# Generated by Django 2.2.6 on 2019-10-21 01:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("teryt_tree", "0006_auto_20191020_1953"),
]
operations = [
migrations.AlterField(
model_name="jednostkaadministracyjna",
name="level",
field=models.PositiveIntegerField(editable=False),
),
migrations.AlterField(
model_name="jednostkaadministracyjna",
name="lft",
field=models.PositiveIntegerField(editable=False),
),
migrations.AlterField(
model_name="jednostkaadministracyjna",
name="rght",
field=models.PositiveIntegerField(editable=False),
),
]
| 26.862069
| 62
| 0.608472
|
db1f525658526f575d640197cfa3e8af0adc1668
| 4,212
|
py
|
Python
|
open/Dell/code/rnnt/tensorrt/preprocessing/preprocessing.py
|
ctuning/inference_results_v1.1
|
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
|
[
"Apache-2.0"
] | 12
|
2021-09-23T08:05:57.000Z
|
2022-03-21T03:52:11.000Z
|
open/Dell/code/rnnt/tensorrt/preprocessing/preprocessing.py
|
ctuning/inference_results_v1.1
|
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
|
[
"Apache-2.0"
] | 24
|
2021-07-19T01:09:35.000Z
|
2022-03-17T11:44:02.000Z
|
open/Dell/code/rnnt/tensorrt/preprocessing/preprocessing.py
|
ctuning/inference_results_v1.1
|
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
|
[
"Apache-2.0"
] | 16
|
2021-09-23T20:26:38.000Z
|
2022-03-09T12:59:56.000Z
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import torch
import torch.nn as nn
from apex import amp
from helpers import Optimization
from parts.features import FeatureFactory
class SpecCutoutRegions(nn.Module):
"""Cutout. refer to https://arxiv.org/pdf/1708.04552.pdf
"""
def __init__(self, cfg):
super(SpecCutoutRegions, self).__init__()
self.cutout_rect_regions = cfg.get('cutout_rect_regions', 0)
self.cutout_rect_time = cfg.get('cutout_rect_time', 5)
self.cutout_rect_freq = cfg.get('cutout_rect_freq', 20)
@torch.no_grad()
def forward(self, x):
sh = x.shape
mask = torch.zeros(x.shape).bool()
for idx in range(sh[0]):
for i in range(self.cutout_rect_regions):
cutout_rect_x = int(random.uniform(
0, sh[1] - self.cutout_rect_freq))
cutout_rect_y = int(random.uniform(
0, sh[2] - self.cutout_rect_time))
mask[idx, cutout_rect_x:cutout_rect_x + self.cutout_rect_freq,
cutout_rect_y:cutout_rect_y + self.cutout_rect_time] = 1
x = x.masked_fill(mask.to(device=x.device), 0)
return x
class SpecAugment(nn.Module):
"""Spec augment. refer to https://arxiv.org/abs/1904.08779
"""
def __init__(self, cfg):
super(SpecAugment, self).__init__()
self.cutout_x_regions = cfg.get('cutout_x_regions', 0)
self.cutout_y_regions = cfg.get('cutout_y_regions', 0)
self.cutout_x_width = cfg.get('cutout_x_width', 10)
self.cutout_y_width = cfg.get('cutout_y_width', 10)
@torch.no_grad()
def forward(self, x):
sh = x.shape
mask = torch.zeros(x.shape).bool()
for idx in range(sh[0]):
for _ in range(self.cutout_x_regions):
cutout_x_left = int(random.uniform(0, sh[1] - self.cutout_x_width))
mask[idx, cutout_x_left:cutout_x_left + self.cutout_x_width, :] = 1
for _ in range(self.cutout_y_regions):
cutout_y_left = int(random.uniform(0, sh[2] - self.cutout_y_width))
mask[idx, :, cutout_y_left:cutout_y_left + self.cutout_y_width] = 1
x = x.masked_fill(mask.to(device=x.device), 0)
return x
class SpectrogramAugmentation(nn.Module):
"""Spectrogram augmentation
"""
def __init__(self, **kwargs):
nn.Module.__init__(self)
self.spec_cutout_regions = SpecCutoutRegions(kwargs)
self.spec_augment = SpecAugment(kwargs)
@torch.no_grad()
def forward(self, input_spec):
augmented_spec = self.spec_cutout_regions(input_spec)
augmented_spec = self.spec_augment(augmented_spec)
return augmented_spec
class AudioPreprocessing(nn.Module):
"""GPU accelerated audio preprocessing
"""
def __init__(self, **kwargs):
nn.Module.__init__(self) # For PyTorch API
self.optim_level = kwargs.get('optimization_level', Optimization.nothing)
self.featurizer = FeatureFactory.from_config(kwargs)
def forward(self, x):
input_signal, length = x
length.requires_grad_(False)
if self.optim_level not in [Optimization.nothing, Optimization.mxprO0, Optimization.mxprO3]:
with amp.disable_casts():
processed_signal = self.featurizer(x)
processed_length = self.featurizer.get_seq_len(length)
else:
processed_signal = self.featurizer(x)
processed_length = self.featurizer.get_seq_len(length)
return processed_signal, processed_length
| 33.428571
| 100
| 0.656695
|
b5e66e738e1cd0b6bead2f8dd3e536f022267eb0
| 4,099
|
py
|
Python
|
src/all_networks_with_n_nodes_e_edges.py
|
Eandreas1857/dsgrn_acdc
|
cfbccbd6cc27ffa4b0bd570ffb4f206b2ca9705c
|
[
"MIT"
] | null | null | null |
src/all_networks_with_n_nodes_e_edges.py
|
Eandreas1857/dsgrn_acdc
|
cfbccbd6cc27ffa4b0bd570ffb4f206b2ca9705c
|
[
"MIT"
] | null | null | null |
src/all_networks_with_n_nodes_e_edges.py
|
Eandreas1857/dsgrn_acdc
|
cfbccbd6cc27ffa4b0bd570ffb4f206b2ca9705c
|
[
"MIT"
] | null | null | null |
import itertools
import networkx as nx
import DSGRN
from DSGRN import *
# Function to generate all binary strings
def generateAllBinaryStrings(binary_list, n, arr, i):
if i == n:
a = []
for i in range(0, n):
a.append(arr[i])
binary_list.append(a)
return
# First assign "0" at ith position
# and try for all other permutations
# for remaining positions
arr[i] = 0
generateAllBinaryStrings(binary_list, n, arr, i + 1)
# And then assign "1" at ith position
# and try for all other permutations
# for remaining positions
arr[i] = 1
generateAllBinaryStrings(binary_list, n, arr, i + 1)
def get_network_string(net):
"""
net: tuple, with first item a set of edges, and second item with a list of 0,1's depicting if edge in first item
is repressing (0) or activating (1).
Example: net = ((('Hb', 'Gt'),
('Hb', 'Kr'),
('Hb', 'Kni'),
('Gt', 'Hb'),
('Gt', 'Kr'),
('Gt', 'Kni'),
('Kr', 'Hb'),
('Kr', 'Gt')),
[0, 0, 1, 1, 0, 0, 1, 0])
then the edge from 'Hb' to 'Gt' is repressing while the 'Kr' to 'Hb' edge is activating.
returns: string for use with DSGRN network input.
"""
net_dict = {'Hb': [], 'Gt': [], 'Kr': [], 'Kni':[]}
for i in net[0]:
index = net[0].index(i)
net_dict[i[1]] += [(net[1][index], i[0])]
new = {}
for node in net_dict:
d = defaultdict(list)
act_str = ''
rep_str = ''
for k, *v in net_dict[node]:
d[k].append(v)
for edge_type in list(d.items()):
if edge_type[0] == 0:
rep_str = ''
for i in edge_type[1]:
rep_str += '(~' + i[0] + ')'
if edge_type[0] == 1:
act_str = '('
for i in edge_type[1]:
if edge_type[1].index(i) == 0:
act_str += i[0]
else:
act_str += '+' + i[0]
act_str += ')'
new[node] = act_str + rep_str
return '"""Hb : ' + new['Hb'] + '\n' + 'Gt : ' + new['Gt'] + '\n' + 'Kr : ' + new['Kr'] + '\n' + 'Kni : ' + new['Kni'] + '"""'
def get_all_networks(node_list, n):
"""
node_list: set of nodes wanting in network.
n: number of edges wanting in network.
returns: list of tuples. First element in tuple is set of edges, second is a binary list
where 0 depictis edge is rep and 1 depicts if edge is act.
"""
binary_list = []
arr = [None]*n
generateAllBinaryStrings(binary_list, n, arr, 0)
edge_list = [(a, b) for a in node_list for b in node_list if a != b]
all_edge_comb = list(itertools.combinations(edge_list, 8))
all_network_comb = [(a, b) for a in all_edge_comb for b in binary_list]
return all_network_comb
def computable_networks(all_network_comb):
computable = []
for net in all_network_comb:
try:
string = get_network_string(net)
network = DSGRN.Network(string)
pg = ParameterGraph(network)
p = pg.size()
computable.append((all_network_comb.index(net), p))
except:
continue
return computable
def return_computable_net_w_limited_PG_size(computable, size_limit = 3240000):
d = defaultdict(list)
for k, v in computable:
d[v].append(k)
allowed = []
for i in sorted(list(d.items())):
if i[0] <= size_limit:
for j in i[1]:
allowed.append(j)
return allowed
def convert_edges_to_networkx(edges):
H = nx.DiGraph()
for edge in edges:
H.add_edge(edge[0], edge[1])
return H
def convert_dict_to_networkx(dict):
H = nx.DiGraph()
for s in dict:
for t in dict[s]:
H.add_edge(s,t)
return H
def save_networkx_as_png(G, filename):
g = nx.drawing.nx_pydot.to_pydot(G)
g.write_png(filename)
| 29.489209
| 130
| 0.529397
|
94b90c258fe8d2bc6bc06651278960c3c2707966
| 7,690
|
py
|
Python
|
pycatflow/input.py
|
Finn090/PyCatFlow
|
5e3c65c6f13c225c94bd3d3a805be867e0b0ff55
|
[
"MIT"
] | null | null | null |
pycatflow/input.py
|
Finn090/PyCatFlow
|
5e3c65c6f13c225c94bd3d3a805be867e0b0ff55
|
[
"MIT"
] | null | null | null |
pycatflow/input.py
|
Finn090/PyCatFlow
|
5e3c65c6f13c225c94bd3d3a805be867e0b0ff55
|
[
"MIT"
] | null | null | null |
def find_delimiter(data):
if type(data) == str:
headers = data.split("\n")[0]
else:
headers = data.decode("utf-8").split("\n")[0]
delimiters = [",", ";", "\t"] # Removed: , "\s", "|"
l = {}
for d in delimiters:
count = 0
for c in headers:
if c.find(d) != -1:
count += 1
l[d] = count
return [k for k, v in l.items() if v == max(l.values())][0]
def detect_dtype(data, prefix):
t1 = []
t2 = []
for x in data:
x = x.replace(prefix, "")
try:
t1.append(int(x))
t2.append("int")
except ValueError:
try:
t1.append(float(x))
t2.append("float")
except ValueError:
from dateutil.parser import parse, ParserError
try:
t1.append(parse(x))
t2.append("date")
except ParserError:
t1.append(x)
t2.append("string")
continue
t = []
for k in set(t2):
[t.append(data[t1.index(h)]) for h in sorted([x for x, y in zip(t1, t2) if y == k]) if h not in t]
return t
def prepare_data(data, columns_data, node_data, category_data, orientation, sort_field, prefix):
new_data = {}
if orientation == 'horizontal':
if sort_field is None:
columns = detect_dtype(data[columns_data], prefix)
else:
columns = []
n_sort_field = [int(x) for x in data[sort_field]]
[columns.append(data[columns_data][n_sort_field.index(x)]) for x in sorted(n_sort_field) if x not in columns]
tags = data[node_data]
counts = [[x for x in tags].count(x) for x in tags]
if category_data is not None:
for l in columns:
d = {x: (z, y) for t, x, y, z in zip(data[columns_data], tags, data[category_data], counts) if l == t}
new_data[l] = {k: v for k, v in d.items()}
else:
for l in columns:
d = {x: z for t, x, z in zip(data[columns_data], tags, counts) if l == t}
new_data[l] = {k: v for k, v in d.items()}
else:
if category_data is not None:
columns = detect_dtype(list(data.keys()), prefix)
tags = []
for l in columns:
[tags.append(y) for y in data[l]]
counts = [[x for x in tags].count(x) for x in tags]
for l in columns:
data[l+"_count"] = [counts[tags.index(x)] for x in data[l]]
d = {x: (z, y) for x, y, z in zip(data[l], data[l + category_data], data[l + "_count"])}
new_data[l] = {k: v for k, v in d.items()}
else:
types = detect_dtype(list(data.keys()), prefix)
columns = detect_dtype(list(data.keys()), prefix)
tags = []
for l in columns:
[tags.append(y) for y in data[l]]
counts = [[x for x in tags].count(x) for x in tags]
for l in columns:
data[l+"_count"] = [counts[tags.index(x)] for x in data[l]]
d = {x: z for x, z in zip(data[l], data[l+"_count"])}
new_data[l] = {k: v for k, v in d.items()}
return new_data
def read_file(filepath,
columns=None,
nodes=None,
categories=None,
column_order=None,
orientation="horizontal",
delimiter=None,
line_delimiter=None,
prefix=""):
"""
Loads data from file and returns parsed data.
Parameters:
filepath (str): Path to file
time_field (str): Name of the column with temporal data (leave None if orientation="vertical")
tag_field (str): Name of the column containing the categorical data / categories;
subtag_field (str): Name of the column containing optional subcategories;
sort_field (str): Optionally provide the name of a column determining the order of the time_field columns
orientation (str): Horizontal if the temporal data are in one columns, vertical if the temporal data are the name of the column;
delimiter (str): Optionally specify the delimiter, if None it will try to autodetect;
line_delimiter (str): optionally define the line_delimiter separator, by default \n
Returns:
data (dict): Dictionary of loaded and parsed data.
"""
with open(filepath, "rb") as f:
data = f.read()
if delimiter is None:
delimiter = find_delimiter(data)
else:
delimiter = delimiter
if line_delimiter is None:
line_delimiter = "\n"
else:
line_delimiter = line_delimiter
headers = data.decode("utf-8-sig").split(line_delimiter)[0].split(delimiter)
lines = data.decode("utf-8-sig").split(line_delimiter)[1:]
lines = [line for line in lines if line != '']
data = {}
for h in headers:
data[h.replace('\r', '')] = [line.split(delimiter)[headers.index(h)].replace('\r', '') for line in lines]
data = prepare_data(data, columns, nodes, categories, orientation, column_order, prefix)
return data
def read(data,
columns=None,
nodes=None,
categories=None,
column_order=None,
orientation="horizontal",
delimiter=None,
line_delimiter=None,
prefix=""):
"""
Parses a string into structured data for visualization.
Parameters:
read(data, columns=None, nodes=None, categories=None, column_order=None, orientation="horizontal",
delimiter=None, line_delimiter=None)
data (str): String with records divided by line_delimiter and fields divided by delimiter; list of lists with the first element as list of headers; dictionary with headers as keys and values as lists;
columns (str): Name of the column with temporal data (leave None if orientation="vertical");
nodes (str): Name of the column containing the node data;
categories (str): Name of the column containing optional categories of nodes;
column_order (str): Optionally provide the name of a column determining the order of the columns;
orientation (str): Horizontal if the temporal data are in one columns, vertical if the temporal data are the name of the column;
delimiter (str): Otpionally specify the delimiter, if None it will try to autodetect
line_delimiter (str): optionally define the line_delimiter separator, by default \n
Returns:
data (dict): Dictionary of parsed data.
"""
if type(data) == str:
if delimiter is None:
delimiter = find_delimiter(data)
else:
delimiter = delimiter
if line_delimiter is None:
line_delimiter = "\n"
else:
line_delimiter = line_delimiter
headers = data.split(line_delimiter)[0].split(delimiter)
lines = data.split(line_delimiter)[1:]
data = {}
for h in headers:
data[h] = [line.split(delimiter)[headers.index(h)] for line in lines]
if type(data) == list:
headers = data[0]
lines = data[1:]
data = {}
for h in headers:
data[h.replace('\r', '')] = [line.split(delimiter)[headers.index(h)].replace('\r', '') for line in lines]
data = prepare_data(data, columns, nodes, categories, orientation, column_order, prefix)
return data
| 40.687831
| 205
| 0.561378
|
38697e08240c4ea80279317bf94d091da7f6c1b9
| 3,246
|
py
|
Python
|
weasyprint/tests/test_draw/test_list.py
|
OptikSage/WeasyPrint
|
e9ccfb1c7a62924cfe05cbee68cd722b4846704a
|
[
"BSD-3-Clause"
] | 12
|
2019-08-02T07:58:16.000Z
|
2022-01-31T23:45:08.000Z
|
weasyprint/tests/test_draw/test_list.py
|
OptikSage/WeasyPrint
|
e9ccfb1c7a62924cfe05cbee68cd722b4846704a
|
[
"BSD-3-Clause"
] | 8
|
2019-08-02T08:06:18.000Z
|
2022-03-11T23:45:17.000Z
|
weasyprint/tests/test_draw/test_list.py
|
OptikSage/WeasyPrint
|
e9ccfb1c7a62924cfe05cbee68cd722b4846704a
|
[
"BSD-3-Clause"
] | 11
|
2019-07-31T16:23:36.000Z
|
2022-01-29T08:30:07.000Z
|
"""
weasyprint.tests.test_draw.test_list
------------------------------------
Test how lists are drawn.
:copyright: Copyright 2011-2018 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import pytest
from . import B, _, assert_pixels, r
from ..testing_utils import SANS_FONTS, assert_no_logs
@assert_no_logs
@pytest.mark.parametrize('position, pixels', (
('outside',
# ++++++++++++++ ++++ <li> horizontal margins: 7px 2px
# ###### <li> width: 12 - 7 - 2 = 3px
# -- list marker margin: 0.5em = 2px
# ******** list marker image is 4px wide
[
_ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + r + B + B + B + _ + _ + _ + _ + _ + _,
_ + _ + B + B + B + B + _ + _ + _ + _ + _ + _,
_ + _ + B + B + B + B + _ + _ + _ + _ + _ + _,
_ + _ + B + B + B + B + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _,
]),
('inside',
# ++++++++++++++ ++++ <li> horizontal margins: 7px 2px
# ###### <li> width: 12 - 7 - 2 = 3px
# ******** list marker image is 4px wide: overflow
[
_ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + r + B + B + B + _,
_ + _ + _ + _ + _ + _ + _ + B + B + B + B + _,
_ + _ + _ + _ + _ + _ + _ + B + B + B + B + _,
_ + _ + _ + _ + _ + _ + _ + B + B + B + B + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _ + _,
])
))
def test_list_style_image(position, pixels):
assert_pixels('list_style_image_' + position, 12, 10, pixels, '''
<style>
@page { size: 12px 10px }
body { margin: 0; background: white; font-family: %s }
ul { margin: 2px 2px 0 7px; list-style: url(pattern.png) %s;
font-size: 2px }
</style>
<ul><li></li></ul>''' % (SANS_FONTS, position))
@assert_no_logs
def test_list_style_image_none():
assert_pixels('list_style_none', 10, 10, [
_ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _,
_ + _ + _ + _ + _ + _ + _ + _ + _ + _,
], '''
<style>
@page { size: 10px }
body { margin: 0; background: white; font-family: %s }
ul { margin: 0 0 0 5px; list-style: none; font-size: 2px; }
</style>
<ul><li>''' % (SANS_FONTS,))
| 38.188235
| 78
| 0.369994
|
f0abbe2b7982d559f238153cc15b961825c581e2
| 3,376
|
py
|
Python
|
scripts/density_representation/optimize_sparse_params.py
|
felixmusil/ml_tools
|
8731bd5628edcf50d03ea7fc99c570f428a08f7b
|
[
"MIT"
] | 1
|
2020-03-10T09:13:45.000Z
|
2020-03-10T09:13:45.000Z
|
scripts/density_representation/optimize_sparse_params.py
|
felixmusil/ml_tools
|
8731bd5628edcf50d03ea7fc99c570f428a08f7b
|
[
"MIT"
] | null | null | null |
scripts/density_representation/optimize_sparse_params.py
|
felixmusil/ml_tools
|
8731bd5628edcf50d03ea7fc99c570f428a08f7b
|
[
"MIT"
] | null | null | null |
import ase
from ase.io import read,write
from ase.visualize import view
import sys,os
import mkl
from glob import glob
from copy import copy
from sklearn.model_selection import KFold,ParameterGrid
import cPickle as pck
import json
import pandas as pd
sys.path.insert(0,'/home/musil/git/ml_tools/')
from ml_tools.descriptors.quippy_interface import RawSoapQUIP
from ml_tools.models.KRR import KRR,TrainerCholesky,KRRFastCV
from ml_tools.models.pipelines import RegressorPipeline
from ml_tools.models.handlers import HashJsonHandler
from ml_tools.kernels.kernels import KernelPower,KernelSparseSoR,KernelSum
from ml_tools.utils import (get_mae,get_rmse,get_sup,get_spearman,
get_score,tqdm_cs,load_pck,dump_json,load_json,
load_data,dump_data)
from ml_tools.split import KFold,EnvironmentalKFold,LCSplit,ShuffleSplit,EnvironmentalShuffleSplit
from ml_tools.model_selection.scorer import CrossValidationScorer
from ml_tools.model_selection.gs import GridSearch
from ml_tools.base import KernelBase,BaseEstimator,TransformerMixin
from ml_tools.math_utils.optimized import power
from ml_tools.compressor.fps import FPSFilter
from ml_tools.compressor.filter import SymmetryFilter
from ml_tools.compressor.powerspectrum_cov import CompressorCovarianceUmat
from ml_tools.base import np,sp
mkl.set_num_threads(15)
path = '/home/musil/workspace/density_paper/'
xyzPath = path + 'data/'
fn_in = path + 'results/nmr/active_set/full_kernel_umat_sp_compression_0.json'
fn_prop = path + 'data/CSD890_H.npy'
fn_out = path + 'results/nmr/active_set/opt_cv_score_sparse.json'
y_train = np.load(fn_prop)
print fn_in
params, Kmat = load_data(fn_in,mmap_mode=None)
fps_ids = params['fps_ids']
soap_params = params['soap_params']
kernel_params = params['kernel_params']
env_mapping = params['env_mapping']
kernel = KernelPower(**kernel_params)
#trainer = TrainerCholesky(memory_efficient=False)
cv = EnvironmentalKFold(n_splits=10,random_state=10,shuffle=True,mapping=env_mapping)
jitter = 1e-8
trainer = TrainerCholesky(memory_efficient=False)
scores = []
deltas = [1,1e-1,1e-2,1e-3]
Lambdas = [2,1,0.7,0.5,0.1]
N_active_samples = [3000,5000,10000,15000,20000]
for delta in tqdm_cs(deltas,desc='delta'):
krr = KRR(jitter,delta,trainer)
for N_active_sample in tqdm_cs(N_active_samples,desc='N_active_sample',leave=False):
active_ids = fps_ids[:N_active_sample]
kMM = Kmat[np.ix_(active_ids,active_ids)]
for Lambda in tqdm_cs(Lambdas,desc='Lambda',leave=False):
preds = []
y_pred = np.zeros(y_train.shape)
for train,test in tqdm_cs(cv.split(Kmat),desc='cv',total=cv.n_splits,leave=False):
kMN = Kmat[np.ix_(active_ids,train)]
## assumes Lambda= Lambda**2*np.diag(np.ones(n))
sparseK = kMM + np.dot(kMN,kMN.T)/Lambda
sparseY = np.dot(kMN,y_train[train])/Lambda
Ktest = Kmat[np.ix_(test,active_ids)]
krr.fit(sparseK,sparseY)
y_pred[test] = krr.predict(Ktest)
sc = get_score(y_pred,y_train)
sc.update(dict(N_active_sample=N_active_sample,
delta=delta,Lambda=Lambda))
print sc
scores.append(sc)
df = pd.DataFrame(scores)
df.to_json(fn_out)
| 37.511111
| 98
| 0.724526
|
820729a6366ee5ee6c778d41e4c88274e4ed256a
| 543
|
py
|
Python
|
Ex_Files_Python_Automation/Exercise Files/Source Code/fileIO.py
|
smile-bold/Python-Projects
|
41773e2c4d3b28304aa7a0a8454e28e59b06c313
|
[
"MIT"
] | null | null | null |
Ex_Files_Python_Automation/Exercise Files/Source Code/fileIO.py
|
smile-bold/Python-Projects
|
41773e2c4d3b28304aa7a0a8454e28e59b06c313
|
[
"MIT"
] | null | null | null |
Ex_Files_Python_Automation/Exercise Files/Source Code/fileIO.py
|
smile-bold/Python-Projects
|
41773e2c4d3b28304aa7a0a8454e28e59b06c313
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
f = open('inputFile.txt', 'r') # opens file with only read permissions
passFile = open('PassFile.txt', 'w') #opens file with write permissions
failFile = open('FailFile.txt', 'w') # opens file with write permissions
for line in f: # loops through line element of total line array
line_split = line.split()
if line_split[2] == 'P': # conditional - if array is split after second word equaling P
passFile.write(line)
else:
failFile.write(line)
f.close()
passFile.close()
failFile.close()
| 33.9375
| 92
| 0.6814
|
c48244655e913fa5ce80f29fce3c3fc85527a6d8
| 1,639
|
py
|
Python
|
lpips_loss.py
|
nelson870708/PerceptualSimilarity
|
c46cc10ea108403d2c42925e1417617912efd4d8
|
[
"BSD-2-Clause"
] | null | null | null |
lpips_loss.py
|
nelson870708/PerceptualSimilarity
|
c46cc10ea108403d2c42925e1417617912efd4d8
|
[
"BSD-2-Clause"
] | null | null | null |
lpips_loss.py
|
nelson870708/PerceptualSimilarity
|
c46cc10ea108403d2c42925e1417617912efd4d8
|
[
"BSD-2-Clause"
] | null | null | null |
import argparse
import matplotlib.pyplot as plt
import torch
from torch.autograd import Variable
import lpips
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--ref_path", type=str, default="./imgs/ex_ref.png")
parser.add_argument("--pred_path", type=str, default="./imgs/ex_p1.png")
parser.add_argument("--use_gpu", action="store_true", help="turn on flag to use GPU")
opt = parser.parse_args()
loss_fn = lpips.LPIPS(net="vgg")
if opt.use_gpu:
loss_fn.cuda()
ref = lpips.im2tensor(lpips.load_image(opt.ref_path))
pred = Variable(lpips.im2tensor(lpips.load_image(opt.pred_path)), requires_grad=True)
if opt.use_gpu:
with torch.no_grad():
ref = ref.cuda()
pred = pred.cuda()
optimizer = torch.optim.Adam([pred, ], lr=1e-3, betas=(0.9, 0.999))
plt.ion()
fig = plt.figure(1)
ax = fig.add_subplot(131)
ax.imshow(lpips.tensor2im(ref))
ax.set_title("target")
ax = fig.add_subplot(133)
ax.imshow(lpips.tensor2im(pred.data))
ax.set_title("initialization")
for i in range(1000):
dist = loss_fn.forward(pred, ref)
optimizer.zero_grad()
dist.backward()
optimizer.step()
pred.data = torch.clamp(pred.data, -1, 1)
if i % 10 == 0:
print("iter %d, dist %.3g" % (i, dist.view(-1).data.cpu().numpy()[0]))
pred.data = torch.clamp(pred.data, -1, 1)
pred_img = lpips.tensor2im(pred.data)
ax = fig.add_subplot(132)
ax.imshow(pred_img)
ax.set_title("iter %d, dist %.3f" % (i, dist.view(-1).data.cpu().numpy()[0]))
plt.pause(5e-2)
# plt.imsave('imgs_saved/%04d.jpg'%i,pred_img)
| 29.8
| 88
| 0.675412
|
8fef64ffa68d7c99523df4ecab4f30ee6df48c8f
| 2,139
|
py
|
Python
|
experiments/1dt_shallowwater/hyperparams.py
|
flabowski/POD-UQNN
|
1c81be432e69d24ae894828f42918fbc1fe54bc1
|
[
"MIT"
] | 15
|
2020-05-29T11:42:14.000Z
|
2022-03-20T03:53:44.000Z
|
experiments/1dt_shallowwater/hyperparams.py
|
flabowski/POD-UQNN
|
1c81be432e69d24ae894828f42918fbc1fe54bc1
|
[
"MIT"
] | null | null | null |
experiments/1dt_shallowwater/hyperparams.py
|
flabowski/POD-UQNN
|
1c81be432e69d24ae894828f42918fbc1fe54bc1
|
[
"MIT"
] | 11
|
2020-06-09T01:16:22.000Z
|
2021-04-27T08:53:02.000Z
|
"""Default hyperparameters for 1D time-dep Burgers Equation."""
import numpy as np
from poduqnn.varneuralnetwork import NORM_MEANSTD
from scipy.optimize import brentq
HP = {}
# Dimension of u(x, t, mu)
HP["n_v"] = 2
# Space
HP["n_x"] = 132
HP["x_min"] = 0.
HP["x_max"] = 100.
# Time
HP["n_t"] = 51
HP["t_min"] = 0.
HP["t_max"] = 5.
# Snapshots count
HP["n_s"] = 40
HP["n_s_tst"] = 3
# POD stopping param
HP["eps"] = 1e-3
HP["eps_init"] = 1e-3
HP["n_L"] = 0
HP["x_noise"] = 0.
# Train/val split
HP["train_val"] = (4/5, 1/5)
# Deep NN hidden layers topology
HP["h_layers"] = [256, 256, 256]
# Setting up TF SGD-based optimizer
HP["n_M"] = 5
HP["epochs"] = 50000
HP["lr"] = 0.001
HP["soft_0"] = 0.001
HP["adv_eps"] = 0.0001
HP["lambda"] = 0.0001
HP["norm"] = NORM_MEANSTD
# Frequency of the logger
HP["log_frequency"] = 500
# Burgers params
HP["mu_min"] = [2.]
HP["mu_max"] = [20.]
def u(X, t, mu, h0=1.):
"""1D Shallow Water analytical solution."""
"""Adapted from https://github.com/python-hydro/pyro2/blob/master/analysis/dam_compare.py."""
x = X[0]
h1 = mu[0]
xmin = x.min()
xmax = x.max()
# optimization
def find_h2(h2):
return (h2/h1)**3 - 9*(h2/h1)**2*(h0/h1) + \
16*(h2/h1)**1.5*(h0/h1) - (h2/h1)*(h0/h1)*(h0/h1+8) + \
(h0/h1)**3
h2 = brentq(find_h2, min(h0, h1), max(h0, h1))
# calculate sound speeds
g = 9.81
c0 = np.sqrt(g*h0)
c1 = np.sqrt(g*h1)
c2 = np.sqrt(g*h2)
u2 = 2 * (c1 - c2)
# shock speed
xi = c0 * np.sqrt(1/8 * ((2*(c2/c0)**2 + 1)**2 - 1))
xctr = 0.5*(xmin + xmax)
h_exact = np.zeros_like(x)
u_exact = np.zeros_like(x)
# h0
idx = x >= xctr + xi*t
h_exact[idx] = h0
u_exact[idx] = 0
# h1
idx = x <= xctr - c1*t
h_exact[idx] = h1
u_exact[idx] = 0
# h2
idx = ((x >= xctr + (u2-c2)*t) & (x < xctr + xi*t))
h_exact[idx] = h2
u_exact[idx] = u2
# h3
idx = ((x >= xctr - c1*t) & (x < xctr + (u2-c2)*t))
c3 = 1/3 * (2*c1 - (x-xctr)/t)
h_exact[idx] = c3[idx]**2 / g
u_exact[idx] = 2 * (c1-c3[idx])
return np.vstack((h_exact, u_exact))
| 22.755319
| 97
| 0.546517
|
aa1f0a3a061d26650a44920c7b76dc53d2090240
| 3,486
|
py
|
Python
|
gita_bot/ses_mailer.py
|
yashlab/node_projs
|
9bce23ea45d8caf3e2e97e33a494caa56e85a4b7
|
[
"MIT"
] | null | null | null |
gita_bot/ses_mailer.py
|
yashlab/node_projs
|
9bce23ea45d8caf3e2e97e33a494caa56e85a4b7
|
[
"MIT"
] | 1
|
2021-04-11T19:35:10.000Z
|
2021-04-11T19:35:10.000Z
|
gita_bot/ses_mailer.py
|
yashlab/node_projs
|
9bce23ea45d8caf3e2e97e33a494caa56e85a4b7
|
[
"MIT"
] | null | null | null |
import smtplib
import email.utils
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
import mimetypes,os,base64,random
from aws_ses_creds import *
'''
This file will contain the following:
* SENDER : email id of the sender (user@example.com) { Make sure the domain is verifed with DKM settings.}
* SENDERNAME : name that will appear to the receiver
* REPLY-EMAIL : mailbox that will monitor your replies
* USERNAME_SMTP : smtp username from amazon SES
* PASSWORD_SMTP : smtp password from amazon SES
* HOST : your region smtp of aws (eg: email-smtp.ap-south-1.amazonaws.com)
* PORT : the port to be used
'''
def ses_emailer(RECIPIENT,SUBJECT,BODY_TEXT,HTML_TEXT,files):
'''
RECIPIENT : a python list object of proposed recipients (if SES is in sandbox, all emails must be verified.)
SUBJECT: subject of the email.
BODY_TEXT: The text of the body
HTML_TEXT: The html content to be embedded. (It somehow overwrites the body_text if put in.)
files: a python list of files to be attached.
'''
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = SUBJECT
msg['From'] = email.utils.formataddr((SENDERNAME, SENDER))
msg['Bcc'] = RECIPIENT
msg['reply-to'] = REPLY_EMAIL
part1 = MIMEText(BODY_TEXT, 'plain')
msg.attach(part1)
# attachment = './android-chrome-192x192.png'
# htm_cnt = (HTML_TEXT[:HTML_TEXT.find('src')] + '''src="cid:{}" '''.format(attachment) + HTML_TEXT[HTML_TEXT.find('alt'):])
msgText = MIMEText(HTML_TEXT, 'html')
msg.attach(msgText)
# fp = open(attachment, 'rb')
# img = MIMEImage(fp.read())
# fp.close()
# img.add_header('Content-ID', '<{}>'.format(attachment))
# msg.attach(img)
for file in files:
content_type, encoding = mimetypes.guess_type(file)
if content_type is None or encoding is not None:
content_type = 'application/octet-stream'
main_type, sub_type = content_type.split('/', 1)
if main_type == 'text':
fp = open(file, 'rb')
message = MIMEText(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'image':
fp = open(file, 'rb')
message = MIMEImage(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'audio':
fp = open(file, 'rb')
message = MIMEAudio(fp.read(), _subtype=sub_type)
fp.close()
else:
fp = open(file, 'rb')
message = MIMEBase(main_type, sub_type)
message.set_payload(fp.read())
fp.close()
filename = os.path.basename(file)
message.add_header('Content-Disposition', 'attachment', filename=filename)
msg.attach(message)
# Try to send the message.
try:
server = smtplib.SMTP(HOST, PORT)
server.ehlo()
server.starttls()
#stmplib docs recommend calling ehlo() before & after starttls()
server.ehlo()
server.login(USERNAME_SMTP, PASSWORD_SMTP)
server.sendmail(SENDER, RECIPIENT, msg.as_string())
server.close()
# Display an error message if something goes wrong.
except Exception as e:
print ("Error: ", e)
else:
print ("Email sent!")
| 37.483871
| 128
| 0.623924
|
c1ad478fb3808f0699dde3af237924ceb1095f9e
| 427
|
py
|
Python
|
app.py
|
clash402/quizzer-2
|
183afc89bb911052c2e99dfeef2433901e5abe13
|
[
"MIT"
] | null | null | null |
app.py
|
clash402/quizzer-2
|
183afc89bb911052c2e99dfeef2433901e5abe13
|
[
"MIT"
] | null | null | null |
app.py
|
clash402/quizzer-2
|
183afc89bb911052c2e99dfeef2433901e5abe13
|
[
"MIT"
] | null | null | null |
from question import Question
from question_bank import QuestionBank
from data_manager import DataManager
from quiz import Quiz
from ui import UI
class App:
def __init__(self):
question_bank = QuestionBank(DataManager(), Question).generate()
quiz = Quiz(question_bank)
self.ui = UI(quiz)
# PUBLIC METHODS
def start(self):
self.ui.go_to_next_question()
self.ui.mainloop()
| 22.473684
| 72
| 0.697892
|
a4831a05e1c75ec00cbbc19e3a60b6a6e9ab9e1e
| 7,132
|
py
|
Python
|
joblib/externals/loky/backend/popen_loky_posix.py
|
cclauss/joblib
|
902fb6bbcf75c461d1b6703e5a01605fc592f214
|
[
"BSD-3-Clause"
] | 1
|
2019-07-16T10:25:24.000Z
|
2019-07-16T10:25:24.000Z
|
joblib/externals/loky/backend/popen_loky_posix.py
|
cclauss/joblib
|
902fb6bbcf75c461d1b6703e5a01605fc592f214
|
[
"BSD-3-Clause"
] | 1
|
2020-02-17T00:13:09.000Z
|
2020-02-17T00:13:09.000Z
|
joblib/externals/loky/backend/popen_loky_posix.py
|
jdanbrown/joblib
|
e205833ed42f0f1c72d69d96f4c266734cea9d95
|
[
"BSD-3-Clause"
] | 1
|
2019-03-25T09:56:23.000Z
|
2019-03-25T09:56:23.000Z
|
###############################################################################
# Popen for LokyProcess.
#
# author: Thomas Moreau and Olivier Grisel
#
import os
import sys
import signal
import pickle
from io import BytesIO
from . import reduction, spawn
from .context import get_spawning_popen, set_spawning_popen
from multiprocessing import util, process
if sys.version_info[:2] < (3, 3):
ProcessLookupError = OSError
if sys.platform != "win32":
from . import semaphore_tracker
__all__ = []
if sys.platform != "win32":
#
# Wrapper for an fd used while launching a process
#
class _DupFd(object):
def __init__(self, fd):
self.fd = reduction._mk_inheritable(fd)
def detach(self):
return self.fd
#
# Start child process using subprocess.Popen
#
__all__.append('Popen')
class Popen(object):
method = 'loky'
DupFd = _DupFd
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self._fds = []
self._launch(process_obj)
if sys.version_info < (3, 4):
@classmethod
def duplicate_for_child(cls, fd):
popen = get_spawning_popen()
popen._fds.append(fd)
return reduction._mk_inheritable(fd)
else:
def duplicate_for_child(self, fd):
self._fds.append(fd)
return reduction._mk_inheritable(fd)
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
while True:
try:
pid, sts = os.waitpid(self.pid, flag)
except OSError as e:
# Child process not yet created. See #1731717
# e.errno == errno.ECHILD == 10
return None
else:
break
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if sys.version_info < (3, 3):
import time
if timeout is None:
return self.poll(0)
deadline = time.time() + timeout
delay = 0.0005
while 1:
res = self.poll()
if res is not None:
break
remaining = deadline - time.time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
return res
if self.returncode is None:
if timeout is not None:
from multiprocessing.connection import wait
if not wait([self.sentinel], timeout):
return None
# This shouldn't block if wait() returned successfully.
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
return self.returncode
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except ProcessLookupError:
pass
except OSError:
if self.wait(timeout=0.1) is None:
raise
def _launch(self, process_obj):
tracker_fd = semaphore_tracker._semaphore_tracker.getfd()
fp = BytesIO()
set_spawning_popen(self)
try:
prep_data = spawn.get_preparation_data(
process_obj._name, process_obj.init_main_module)
reduction.dump(prep_data, fp)
reduction.dump(process_obj, fp)
finally:
set_spawning_popen(None)
try:
parent_r, child_w = os.pipe()
child_r, parent_w = os.pipe()
# for fd in self._fds:
# _mk_inheritable(fd)
cmd_python = [sys.executable]
cmd_python += ['-m', self.__module__]
cmd_python += ['--name-process', str(process_obj.name)]
cmd_python += ['--pipe',
str(reduction._mk_inheritable(child_r))]
reduction._mk_inheritable(child_w)
if tracker_fd is not None:
cmd_python += ['--semaphore',
str(reduction._mk_inheritable(tracker_fd))]
self._fds.extend([child_r, child_w, tracker_fd])
util.debug("launch python with cmd:\n%s" % cmd_python)
from .fork_exec import fork_exec
pid = fork_exec(cmd_python, self._fds)
self.sentinel = parent_r
method = 'getbuffer'
if not hasattr(fp, method):
method = 'getvalue'
with os.fdopen(parent_w, 'wb') as f:
f.write(getattr(fp, method)())
self.pid = pid
finally:
if parent_r is not None:
util.Finalize(self, os.close, (parent_r,))
for fd in (child_r, child_w):
if fd is not None:
os.close(fd)
@staticmethod
def thread_is_spawning():
return True
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser('Command line parser')
parser.add_argument('--pipe', type=int, required=True,
help='File handle for the pipe')
parser.add_argument('--semaphore', type=int, required=True,
help='File handle name for the semaphore tracker')
parser.add_argument('--name-process', type=str, default=None,
help='Identifier for debugging purpose')
args = parser.parse_args()
info = dict()
semaphore_tracker._semaphore_tracker._fd = args.semaphore
exitcode = 1
try:
with os.fdopen(args.pipe, 'rb') as from_parent:
process.current_process()._inheriting = True
try:
prep_data = pickle.load(from_parent)
spawn.prepare(prep_data)
process_obj = pickle.load(from_parent)
finally:
del process.current_process()._inheriting
exitcode = process_obj._bootstrap()
except Exception as e:
print('\n\n' + '-' * 80)
print('{} failed with traceback: '.format(args.name_process))
print('-' * 80)
import traceback
print(traceback.format_exc())
print('\n' + '-' * 80)
finally:
if from_parent is not None:
from_parent.close()
sys.exit(exitcode)
| 33.172093
| 79
| 0.502243
|
b7ca445c5b63016b656d45482d196fd1f7c6523b
| 314
|
py
|
Python
|
contracts/enums.py
|
City-of-Helsinki/berth-reservations
|
a3b1a8c2176f132505527acdf6da3a62199401db
|
[
"MIT"
] | 3
|
2020-10-13T07:58:48.000Z
|
2020-12-22T09:41:50.000Z
|
contracts/enums.py
|
City-of-Helsinki/berth-reservations
|
a3b1a8c2176f132505527acdf6da3a62199401db
|
[
"MIT"
] | 422
|
2018-10-25T10:57:05.000Z
|
2022-03-30T05:47:14.000Z
|
contracts/enums.py
|
City-of-Helsinki/berth-reservations
|
a3b1a8c2176f132505527acdf6da3a62199401db
|
[
"MIT"
] | 1
|
2020-04-03T07:38:03.000Z
|
2020-04-03T07:38:03.000Z
|
from django.db.models import TextChoices
from django.utils.translation import gettext_lazy as _
class ContractStatus(TextChoices):
NEW = "new", _("New")
PENDING = "pending", _("Pending")
SIGNED = "signed", _("Signed")
DELETED = "deleted", _("Deleted")
CANCELLED = "cancelled", _("Cancelled")
| 28.545455
| 54
| 0.678344
|
ad8451beb5399539f276cbaaaf7a1dab0cc3c741
| 2,483
|
py
|
Python
|
signing/test/test_signedprotocol.py
|
nkrowlan/signing-server
|
53f9b8ffef493526c467d59b93fc71a6644a7b6a
|
[
"Apache-2.0"
] | null | null | null |
signing/test/test_signedprotocol.py
|
nkrowlan/signing-server
|
53f9b8ffef493526c467d59b93fc71a6644a7b6a
|
[
"Apache-2.0"
] | null | null | null |
signing/test/test_signedprotocol.py
|
nkrowlan/signing-server
|
53f9b8ffef493526c467d59b93fc71a6644a7b6a
|
[
"Apache-2.0"
] | null | null | null |
from twisted.internet import defer
from twisted.test import proto_helpers
from twisted.trial import unittest
from signing.processor import Processor
from signing.signedprotocol import SignedProtocol, SignedProtocolFactory, InvalidSignature
class SignedProtocolTestCase(unittest.TestCase):
"""
Trivial command implementation for test.
"""
class DummyProcessor(object):
def process(self, command, args):
d = defer.Deferred()
d.callback('result')
return d
"""
To avoid testing signing in this file, just append a letter as the signature.
"""
class ClearSigner(object):
def __init__(self, key):
pass
def signWithKey(self, key, data):
return data + 'a'
def sign(self, data):
return data + 'a'
"""
To avoid testing validation in this file, use a toggleable validation implementation.
To remove signature, just remove the last letter.
"""
class ToggleableValidation(object):
valid = False
signed_data = ''
def validates(self, key, signed_data):
return self.valid
def removeSignature(self, key, signed_data):
return signed_data[:-1]
def setUp(self):
self.processor = self.DummyProcessor()
self.signer = self.ClearSigner('signing_key')
self.validator = self.ToggleableValidation()
factory = SignedProtocolFactory(self.processor, self.signer, self.validator)
self.proto = factory.buildProtocol()
self.tr = proto_helpers.StringTransportWithDisconnection()
self.tr.protocol = self.proto
self.proto.makeConnection(self.tr)
def test_sign_failure_response(self):
self.proto.clientkey = 'key1'
self.validator.valid = False
self.proto.lineReceived('this will fail to validate, according to validator')
self.assertEquals(self.tr.value().strip(), self.signer.sign('invalid'))
def test_sign_success_response(self):
self.proto.clientkey = 'key1'
self.validator.valid = True
self.proto.lineReceived('this will validate, so we should not see result from processor')
self.assertEquals(self.validator.removeSignature(None, (self.tr.value().strip())), 'result')
def test_set_key(self):
self.proto.clientkey = None
key = 'key1'
self.proto.lineReceived(key)
self.assertEqual(self.proto.clientkey, 'key1')
| 31.833333
| 100
| 0.660491
|
b868aa43973c7b3c028d3471b8b163aeeb5ea49a
| 363
|
py
|
Python
|
examples/get_search.py
|
ToucanTocard/crawliexpress
|
1506bdcd4d02d4f7ac9fc9829deff2f737bea211
|
[
"MIT"
] | null | null | null |
examples/get_search.py
|
ToucanTocard/crawliexpress
|
1506bdcd4d02d4f7ac9fc9829deff2f737bea211
|
[
"MIT"
] | 2
|
2020-12-23T00:59:14.000Z
|
2021-07-22T15:47:48.000Z
|
examples/get_search.py
|
ToucanTocard/crawliexpress
|
1506bdcd4d02d4f7ac9fc9829deff2f737bea211
|
[
"MIT"
] | null | null | null |
from crawliexpress import Client
from time import sleep
client = Client(
"https://www.aliexpress.com",
# copy it from your browser cookies
"xxxx",
)
page = 1
while True:
search_page = client.get_search("akame ga kill", page=page)
print(search_page.page)
if search_page.has_next_page() is False:
break
page += 1
sleep(1)
| 19.105263
| 63
| 0.666667
|
93e43af349b642b611b0b5f14dcc6b912a3ffb3e
| 1,112
|
py
|
Python
|
tests/test_estimator.py
|
iglpdc/dmrg_helpers
|
df27ff06395c0a4779c2d2723d57524da55cc14a
|
[
"MIT"
] | 1
|
2019-09-26T13:06:04.000Z
|
2019-09-26T13:06:04.000Z
|
tests/test_estimator.py
|
iglpdc/dmrg_helpers
|
df27ff06395c0a4779c2d2723d57524da55cc14a
|
[
"MIT"
] | null | null | null |
tests/test_estimator.py
|
iglpdc/dmrg_helpers
|
df27ff06395c0a4779c2d2723d57524da55cc14a
|
[
"MIT"
] | null | null | null |
'''
Test for the database class.
'''
import os
from nose.tools import with_setup
from dmrg_helpers.extract.database import Database
def setup_function():
pass
def teardown_function():
os.remove('tests/db_test.sqlite3')
@with_setup(setup_function, teardown_function)
def test_members():
db = Database('tests/db_test.sqlite3')
db.insert_data_from_file('tests/file_one.dat')
n_up = db.get_estimator('n_up')
assert n_up.name == 'n_up'
meta_keys = n_up.meta_keys
assert 'parameter_1' in meta_keys
assert 'parameter_2' in meta_keys
assert len(n_up) == 1
assert '1.0:a_string' in n_up.data.keys()
@with_setup(setup_function, teardown_function)
def test_save():
db = Database('tests/db_test.sqlite3')
db.insert_data_from_file('tests/file_one.dat')
n_up = db.get_estimator('n_up')
n_up.save_as_txt('n_up', 'tests/')
contents = '0 1.0\n1 2.0'
with open('tests/n_up_parameter_1_1.0_parameter_2_a_string.dat', 'r') as f:
from_file = f.read()
assert from_file == contents
os.remove('tests/n_up_parameter_1_1.0_parameter_2_a_string.dat')
| 30.054054
| 79
| 0.715827
|
86aab7ceb9b8ba6be1e8d9aa4d80c731229988f5
| 3,159
|
py
|
Python
|
pypop/simulation/wold.py
|
trouleau/pypop
|
a9b22ad02e890e250fb91a8dc32a9dfde3d4e3de
|
[
"MIT"
] | null | null | null |
pypop/simulation/wold.py
|
trouleau/pypop
|
a9b22ad02e890e250fb91a8dc32a9dfde3d4e3de
|
[
"MIT"
] | null | null | null |
pypop/simulation/wold.py
|
trouleau/pypop
|
a9b22ad02e890e250fb91a8dc32a9dfde3d4e3de
|
[
"MIT"
] | null | null | null |
import random as rd
import numpy as np
import numba
#@numba.njit
def _total_intensity(mu, adj, beta, delta, t):
return mu + np.sum(adj / (beta + 1 + delta), axis=0)
#@numba.njit
def _simulate(mu, adj, beta, last, delta, start_t, start_n, max_t, max_n, seed=None):
dim = len(mu)
# FIXME: Add fake 0.0 events to avoid numba complaining of unknown type
events = [[0.0] for i in range(dim)]
if seed:
rd.seed(seed)
# Init time
t = float(start_t)
max_time = t + max_t
# Init number of jumps
n_jumps = int(start_n)
max_jumps = n_jumps + max_n
while (t < max_time) and (n_jumps < max_jumps):
# Compute intensity at each node
lambdas_t = _total_intensity(mu, adj, beta, delta, t)
# Compute total intensity
sum_lambdas_t = lambdas_t.cumsum()
# Sample next event time
dt = rd.expovariate(sum_lambdas_t[-1])
# Increase current time
t = float(t + dt)
n_jumps += 1
if t > max_time:
break
# Sample next event dimension
u = rd.random() * sum_lambdas_t[-1]
i = np.searchsorted(sum_lambdas_t, u)
# Add event to the history
events[i].append(t)
# Update cache for intensity computation
delta[:, i] = t - last
last[i] = t
# FIXME: Reove fake 0.0 events
events = [ev[1:] for ev in events]
return events, last, delta, t, n_jumps
class MultivariateWoldSimulator(object):
def __init__(self, mu_a, alpha_ba, beta_ba):
self.mu_a = np.asanyarray(mu_a)
assert len(self.mu_a.shape) == 1
self.dim = len(self.mu_a)
self.alpha_ba = np.asanyarray(alpha_ba)
assert self.alpha_ba.shape == (self.dim, self.dim)
self.beta_ba = np.asanyarray(beta_ba)
assert self.beta_ba.shape == (self.dim, self.dim)
self.last = 0.0 * np.ones(self.dim) # Time of previous event in each dim
self.delta = 0.0 * np.ones((self.dim, self.dim)) # Last inter-event dim
self.events = [[] for i in range(self.dim)] # List of events
self.t = 0.0
self.n_jumps = 0
def simulate(self, *, max_time=np.inf, max_jumps=np.inf, seed=None):
if seed is None:
seed = rd.randint(0, 2 ** 32 - 1)
if not ((max_time < np.inf) ^ (max_jumps < np.inf)):
raise ValueError('Either `max_time` or `max_jumps` must be set, but not both.')
new_events, last, delta, new_time, new_jumps = _simulate(
mu=self.mu_a, adj=self.alpha_ba, beta=self.beta_ba,
last=self.last, delta=self.delta, start_t=self.t,
start_n=self.n_jumps, max_t=max_time, max_n=max_jumps, seed=seed)
self.last = last.copy()
self.delta = delta.copy()
for i in range(self.dim):
self.events[i].extend(new_events[i])
self.t = new_time
self.n_jumps = new_jumps
return list(map(np.array, self.events))
@property
def end_time(self):
return self.t
def spectral_radius(self):
eigs = np.linalg.eigvals(self.alpha_ba / (self.beta_ba + 1))
return eigs.max() - eigs.min()
| 34.714286
| 91
| 0.600823
|
050b387ed1a1dfa36fa27847f66d96f0e99ed7f3
| 5,395
|
py
|
Python
|
main.py
|
VedderPradhan/redditanalyst
|
be3efcab602b82b33aa9356fd2ba7672cf91e271
|
[
"BSD-3-Clause"
] | null | null | null |
main.py
|
VedderPradhan/redditanalyst
|
be3efcab602b82b33aa9356fd2ba7672cf91e271
|
[
"BSD-3-Clause"
] | null | null | null |
main.py
|
VedderPradhan/redditanalyst
|
be3efcab602b82b33aa9356fd2ba7672cf91e271
|
[
"BSD-3-Clause"
] | null | null | null |
#import the necesary libraries
import praw
import beepy as beep
import time
nagreedacounts = 50 #number of accounts that are desired to check
naccounts = nagreedacounts #variable number of accouts for repeated acconts on the subreddit
fixedaccounts = nagreedacounts #fixed number of accounts
print("\nMain program started...")
subreddit = str(input("Type the subreddit you want to check (without the r/): ")) #input of the desired subreddit
start_time = time.time() #starts the timer that returns the amount of time it takes to run
reddit = praw.Reddit(
client_id="X", #Client ID of the Reddit bot
client_secret="X", #Secret Client ID
user_agent="testscript by u/Raymon22", #brief description
username = "X", #Username (to avoid some 403 errors)
password= "X" #password (to avoid some 403 errors)
)
postcounter = 0 #counts the posts that the user has in r/depression
accountschecked = 0 #number of accounts checked
users = [] #list of users checked
file = open(f"{subreddit}log.txt", "w+") #Creates the file with the name of the subreddit that is desired + log.txt
file.write("\nLog: ")
file.write("\n")
#while the users checked is not the same as the fixed number of accounts
while len(users) != fixedaccounts:
#for every hot submission in the desired subreddit (limited by naccounts)
for submission in reddit.subreddit(subreddit).hot(limit=naccounts):
redditorstartime = time.time() #starts timer to check how long does it take per user
redditor = submission.author #gets the author of the post
#filters the name of the author
name = str(redditor)
name = name.replace("Redditor(name='", "")
name = name.replace("')", "")
if name in users:
naccounts += 1
pass #if the name is in the list, it ignores the name
else:
users.append(name) #adds the new name to the array
accountschecked += 1 #adds one in the number of checked accounts
#gets the IDs of the posts of user. Limit is the number of posts. None for all posts
submisions = list(redditor.submissions.new(limit=None))
#print(submisions)
idstring = str(submisions) # transform to string
#this fragment of code "cleans" the sting, leaving only the ids
idstring = idstring.lower() # lowes all the string
idstring = idstring.replace("submission", "")
idstring = idstring.replace("(", "")
idstring = idstring.replace(")", "")
idstring = idstring.replace("id=", "")
idstring = idstring.replace('\'', "")
idstring = idstring.replace('[', "")
idstring = idstring.replace(']', "")
idstring = idstring.replace(" ", "")
#print(idstring)
array_id = idstring.split(",") #splits the string and stores that string into an array/list
#print(array_id) #shows the array/list
#if any of the ids are missing, give a specified id (not in r/depression)
for id in array_id:
if id == " " or id == "":
id = "dy6cvb"
post = reddit.submission(id=id) #get the post information
subredditname = post.subreddit_name_prefixed #get the subredditname
#if the word "depression" is in the subbredit name
if "depression" in subredditname:
postcounter += 1 #add one in the counter
#print(str(post.title) + " ---- " + str(subredditname)) #shows the title + the subreddit (always r/depression)
else:
pass #add something here if wanted
timeaccount = (time.time() - redditorstartime) #stop the user timer
#this prints some info in the console and txt: number of accounts checked, time it took for every account, how many posts does the user have and the reddit username
print(f"Accounts Checked: {accountschecked}" + " in --- %s seconds ---" % timeaccount + f" ({len(array_id)} posts)" + " --- " + f"r/{redditor}")
file.write(f"\n - Accounts Checked: {accountschecked}" + " in --- %s seconds ---" % timeaccount + f" ({len(array_id)} posts)" + " --- " + f"r/{redditor}")
#emergency break
if accountschecked == nagreedacounts:
break
timeprogram = (time.time() - start_time) #stop the main timer
print("\n-------------------------------------------------------------------------------")
print(f"\n - Posts in r/depression by the redditors of the {subreddit} subreddit : {postcounter}")
print("\n --- %s seconds to execute the program---" % timeprogram)
#TXT write
file.write(f"\n-------------------------------------------------------------------------------")
file.write(f"\n - Posts in r/depression by the redditors of the {subreddit} subreddit : {postcounter}")
file.write(f"\n --- %s seconds to execute the program---" % timeprogram)
file.write(f"\nCoded by RamonG")
file.close()
beep.beep(4)
print("\nCoded by RaymonDev")
print("\n----DeveloperInfo----")
print("\n -Array users: " + str(users))
print("\n -Array size: " + str(len(users)))
enter = input("\nPress enter to finish...")
| 42.480315
| 177
| 0.596849
|
5a8c2738e2bfd60e1da9205eb98b3c9d086b7e32
| 2,524
|
py
|
Python
|
a4/utils.py
|
MG2033/CS224N
|
ad8cd76c925ccc03ad813bc11d1c05d9614c7948
|
[
"MIT"
] | null | null | null |
a4/utils.py
|
MG2033/CS224N
|
ad8cd76c925ccc03ad813bc11d1c05d9614c7948
|
[
"MIT"
] | null | null | null |
a4/utils.py
|
MG2033/CS224N
|
ad8cd76c925ccc03ad813bc11d1c05d9614c7948
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2018-19: Homework 4
nmt.py: NMT Model
Pencheng Yin <pcyin@cs.cmu.edu>
Sahil Chopra <schopra8@stanford.edu>
"""
import math
from typing import List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def pad_sents(sents, pad_token):
""" Pad list of sentences according to the longest sentence in the batch.
@param sents (list[list[str]]): list of sentences, where each sentence
is represented as a list of words
@param pad_token (str): padding token
@returns sents_padded (list[list[str]]): list of sentences where sentences shorter
than the max length sentence are padded out with the pad_token, such that
each sentences in the batch now has equal length.
"""
sents_padded = []
### YOUR CODE HERE (~6 Lines)
max_sent_length = np.max([len(sent) for sent in sents])
for sent in sents:
sents_padded.append(sent + [pad_token for _ in range(max_sent_length - len(sent))])
### END YOUR CODE
return sents_padded
def read_corpus(file_path, source):
""" Read file, where each sentence is dilineated by a `\n`.
@param file_path (str): path to file containing corpus
@param source (str): "tgt" or "src" indicating whether text
is of the source language or target language
"""
data = []
for line in open(file_path):
sent = line.strip().split(' ')
# only append <s> and </s> to the target sentence
if source == 'tgt':
sent = ['<s>'] + sent + ['</s>']
data.append(sent)
return data
def batch_iter(data, batch_size, shuffle=False):
""" Yield batches of source and target sentences reverse sorted by length (largest to smallest).
@param data (list of (src_sent, tgt_sent)): list of tuples containing source and target sentence
@param batch_size (int): batch size
@param shuffle (boolean): whether to randomly shuffle the dataset
"""
batch_num = math.ceil(len(data) / batch_size)
index_array = list(range(len(data)))
if shuffle:
np.random.shuffle(index_array)
for i in range(batch_num):
indices = index_array[i * batch_size: (i + 1) * batch_size]
examples = [data[idx] for idx in indices]
examples = sorted(examples, key=lambda e: len(e[0]), reverse=True)
src_sents = [e[0] for e in examples]
tgt_sents = [e[1] for e in examples]
yield src_sents, tgt_sents
| 32.358974
| 100
| 0.651347
|
86d900d72f46bde9bfc14a769cccbf5d93ab9e29
| 1,617
|
py
|
Python
|
problematic/peakfinders2d.py
|
stefsmeets/problematic
|
b9b5294d93f70b7c6fa755594ab1f6111e4bc383
|
[
"MIT"
] | 9
|
2018-09-10T04:29:50.000Z
|
2020-08-08T17:38:40.000Z
|
problematic/peakfinders2d.py
|
stefsmeets/problematic
|
b9b5294d93f70b7c6fa755594ab1f6111e4bc383
|
[
"MIT"
] | 5
|
2018-06-08T08:46:24.000Z
|
2022-02-15T08:15:47.000Z
|
problematic/peakfinders2d.py
|
stefsmeets/problematic
|
b9b5294d93f70b7c6fa755594ab1f6111e4bc383
|
[
"MIT"
] | 7
|
2019-07-16T16:18:05.000Z
|
2021-11-04T14:27:00.000Z
|
import numpy as np
import scipy.ndimage as ndi
NO_PEAKS = np.array([[np.nan, np.nan]])
def clean_peaks(peaks):
if len(peaks) == 0:
return NO_PEAKS
else:
return peaks
def find_peaks_regionprops(z, min_sigma=4, max_sigma=5, threshold=1,
min_size=50, return_props=False):
"""
Finds peaks using regionprops.
Uses the difference of two gaussian convolutions to separate signal from
background, and then uses the skimage.measure.regionprops function to find
connected islands (peaks). Small blobs can be rejected using `min_size`.
Parameters
----------
z : numpy.ndarray
Array of image intensities.
min_sigma : int, float
Standard deviation for the minimum gaussian convolution
max_sigma : int, float
Standard deviation for the maximum gaussian convolution
threshold : int, float
Minimum difference in intensity
min_size : int
Minimum size in pixels of blob
return_props : bool
Return skimage.measure.regionprops
Returns
-------
numpy.ndarray
(n_peaks, 2)
Array of peak coordinates.
"""
from skimage import morphology, measure
difference = ndi.gaussian_filter(z, min_sigma) - ndi.gaussian_filter(z, max_sigma)
labels, numlabels = ndi.label(difference > threshold)
labels = morphology.remove_small_objects(labels, min_size)
props = measure.regionprops(labels, z)
if return_props:
return props
else:
peaks = np.array([prop.centroid for prop in props])
return clean_peaks(peaks)
| 27.87931
| 86
| 0.667285
|
dba260aebb1e650a01e92add05c53be3ac93019e
| 4,554
|
py
|
Python
|
src/so.py
|
salembeats/alfred-stackoverflow
|
4faf08f0d03959ce784fb9c7a6c29adf503dfe11
|
[
"MIT"
] | 1
|
2019-12-18T03:07:25.000Z
|
2019-12-18T03:07:25.000Z
|
src/so.py
|
salembeats/alfred-stackoverflow
|
4faf08f0d03959ce784fb9c7a6c29adf503dfe11
|
[
"MIT"
] | null | null | null |
src/so.py
|
salembeats/alfred-stackoverflow
|
4faf08f0d03959ce784fb9c7a6c29adf503dfe11
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# encoding: utf-8
#
# Copyright © 2014 deanishe@deanishe.net
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2014-12-29
#
"""
Search StackOverflow API
"""
from __future__ import print_function, unicode_literals, absolute_import
import functools
from HTMLParser import HTMLParser
import re
import sys
from workflow import Workflow, web, ICON_WARNING
USER_AGENT = 'Alfred-StackOverflow/{version} ({url})'
UPDATE_SETTINGS = {'github_slug': 'deanishe/alfred-stackoverflow'}
ICON_ANSWERED = 'answered.png'
ICON_UPDATE = 'update-available.png'
# Shown in error logs. Users can find help here
HELP_URL = 'https://github.com/deanishe/alfred-stackoverflow'
# Can be any Stack Exchange site
SITE = 'stackoverflow'
# API endpoint for all Stack Exchange sites
API_URL = 'https://api.stackexchange.com/2.2/search'
# Number of results to fetch from API
RESULT_COUNT = 50
# How long to cache results for
CACHE_MAX_AGE = 20 # seconds
# h.unescape() turns HTML escapes back into real characters
h = HTMLParser()
log = None
def cache_key(query, tags):
"""Make filesystem-friendly cache key"""
key = query + '_' + ';'.join(tags)
key = key.lower()
key = re.sub(r'[^a-z0-9-_;\.]', '-', key)
key = re.sub(r'-+', '-', key)
log.debug('Cache key : {!r} {!r} -> {!r}'.format(query, tags, key))
return key
def handle_answer(api_dict):
"""Extract relevant info from API result"""
result = {}
for key in ('title', 'link', 'tags'):
result[key] = h.unescape(api_dict[key])
result['answered'] = api_dict['is_answered']
return result
def get_answers(query=None, tags=None, limit=RESULT_COUNT):
"""Return list of answers from API"""
headers = {}
headers['user-agent'] = USER_AGENT.format(version=wf.version,
url=wf.help_url)
params = {
'page': 1,
'pagesize': limit,
'order': 'desc',
'sort': 'relevance',
'site': SITE
}
if query:
params['intitle'] = query
if tags:
params['tagged'] = ';'.join(tags)
r = web.get(API_URL, params, headers=headers)
log.debug('[{}] {}'.format(r.status_code, r.url))
r.raise_for_status()
data = r.json()
results = [handle_answer(d) for d in data['items']]
# Sort with answered first
answered = []
unanswered = []
for d in results:
if d['answered']:
answered.append(d)
else:
unanswered.append(d)
return answered + unanswered
def main(wf):
# Update available?
if wf.update_available:
wf.add_item('A newer version is available',
'↩ to install update',
autocomplete='workflow:update',
icon=ICON_UPDATE)
query = wf.args[0].strip()
# Tag prefix only. Treat as blank query
if query == '.':
query = ''
log.debug('query : {!r}'.format(query))
if not query:
wf.add_item('Search StackOverflow')
wf.send_feedback()
return 0
# Parse query into query string and tags
words = query.split(' ')
query = []
tags = []
for word in words:
if word.startswith('.'):
if word != '.': # Ignore empty tags
tags.append(word[1:])
else:
query.append(word)
query = ' '.join(query)
key = cache_key(query, tags)
# Fetch answers from API
answers = wf.cached_data(key, functools.partial(get_answers, query, tags),
max_age=CACHE_MAX_AGE)
log.debug('{} answers for {!r}, tagged {!r}'.format(len(answers),
query,
tags))
# Show results
if not answers:
wf.add_item('No matching answers found',
'Try a different query',
icon=ICON_WARNING)
for answer in answers:
if answer['answered']:
icon = ICON_ANSWERED
else:
icon = 'icon.png'
wf.add_item(answer['title'],
', '.join(answer['tags']),
arg=answer['link'],
valid=True,
largetext=answer['title'],
icon=icon)
# log.debug(answer)
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow(help_url=HELP_URL,
update_settings=UPDATE_SETTINGS)
log = wf.logger
sys.exit(wf.run(main))
| 24.095238
| 78
| 0.567633
|
da3c7b1852e0544c8dfab3969fe2fbaa46017c4d
| 1,039
|
py
|
Python
|
scripts/mtDNA/split_mtdna_by_genes.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | 10
|
2015-04-28T14:15:04.000Z
|
2021-03-15T00:07:38.000Z
|
scripts/mtDNA/split_mtdna_by_genes.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | null | null | null |
scripts/mtDNA/split_mtdna_by_genes.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | 6
|
2017-03-16T22:38:41.000Z
|
2021-08-11T00:22:52.000Z
|
#!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import os
import argparse
from Bio import SeqIO
from RouToolPa.GeneralRoutines import FileRoutines
from RouToolPa.Routines import MtDNARoutines
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file", action="store", dest="input", required=True,
type=lambda s: FileRoutines.make_list_of_path_to_files(s.split(",")),
help="Comma separated list of genbank files/directories")
#parser.add_argument("-o", "--output_file_prefix", action="store", dest="output_prefix", required=True,
# help="Prefix of output files")
parser.add_argument("-f", "--format", action="store", dest="format", default="genbank",
help="Format of input and output file. Allowed formats genbank(default), fasta")
args = parser.parse_args()
record_dict = SeqIO.index_db("tmp.idx", args.input, format=args.format)
MtDNARoutines.split_mitochondrion_genome_by_genes(record_dict, black_list=[])
os.remove("tmp.idx")
| 37.107143
| 103
| 0.708373
|
03b53f72d734af70048764b087c81f0c1e47a090
| 16
|
py
|
Python
|
custom_components/apc-home/typings/__init__.py
|
xannor/ha-apc-home
|
ee7198b5bb527596ff8eb6a060ef80b889243d29
|
[
"Unlicense"
] | null | null | null |
custom_components/apc-home/typings/__init__.py
|
xannor/ha-apc-home
|
ee7198b5bb527596ff8eb6a060ef80b889243d29
|
[
"Unlicense"
] | null | null | null |
custom_components/apc-home/typings/__init__.py
|
xannor/ha-apc-home
|
ee7198b5bb527596ff8eb6a060ef80b889243d29
|
[
"Unlicense"
] | null | null | null |
""" Typings """
| 8
| 15
| 0.4375
|
2339bab622eebfa9429efa3aa9019a393fa81e8b
| 1,840
|
py
|
Python
|
scripts/bootstrap_filter_demo.py
|
karalleyna/pyprobml
|
72195e46fdffc4418910e76d02e3d6469f4ce272
|
[
"MIT"
] | null | null | null |
scripts/bootstrap_filter_demo.py
|
karalleyna/pyprobml
|
72195e46fdffc4418910e76d02e3d6469f4ce272
|
[
"MIT"
] | null | null | null |
scripts/bootstrap_filter_demo.py
|
karalleyna/pyprobml
|
72195e46fdffc4418910e76d02e3d6469f4ce272
|
[
"MIT"
] | null | null | null |
# Demo of the bootstrap filter under a
# nonlinear discrete system
# Author: Gerardo Gerardo Durán-Martín (@gerdm)
import jax
import nlds_lib as ds
import jax.numpy as jnp
import matplotlib.pyplot as plt
from jax import random
import pyprobml_utils as pml
def plot_samples(sample_state, sample_obs, ax=None):
fig, ax = plt.subplots()
ax.plot(*sample_state.T, label="state space")
ax.scatter(*sample_obs.T, s=60, c="tab:green", marker="+")
ax.scatter(*sample_state[0], c="black", zorder=3)
ax.legend()
ax.set_title("Noisy observations from hidden trajectory")
plt.axis("equal")
def plot_inference(sample_obs, mean_hist):
fig, ax = plt.subplots()
ax.scatter(*sample_obs.T, marker="+", color="tab:green", s=60)
ax.plot(*mean_hist.T, c="tab:orange", label="filtered")
ax.scatter(*mean_hist[0], c="black", zorder=3)
plt.legend()
plt.axis("equal")
if __name__ == "__main__":
key = random.PRNGKey(314)
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
def fz(x, dt): return x + dt * jnp.array([jnp.sin(x[1]), jnp.cos(x[0])])
def fx(x): return x
dt = 0.4
nsteps = 100
# Initial state vector
x0 = jnp.array([1.5, 0.0])
# State noise
Qt = jnp.eye(2) * 0.001
# Observed noise
Rt = jnp.eye(2) * 0.05
key = random.PRNGKey(314)
model = ds.NLDS(lambda x: fz(x, dt), fx, Qt, Rt)
sample_state, sample_obs = model.sample(key, x0, nsteps)
fz_vec = jax.vmap(fz, in_axes=(0, None))
particle_filter = ds.BootstrapFiltering(lambda x: fz_vec(x, dt), fx, Qt, Rt)
pf_mean = particle_filter.filter(key, x0, sample_obs)
plot_inference(sample_obs, pf_mean)
pml.savefig("nlds2d_bootstrap.pdf")
plot_samples(sample_state, sample_obs)
pml.savefig("nlds2d_data.pdf")
plt.show()
| 28.307692
| 80
| 0.659783
|
14d428b15ebcb7d3e882314754e363ef45b6ccd1
| 1,147
|
py
|
Python
|
core/commands/admin/info_group.py
|
TheLonelyAdventurer/nebula8
|
e8fd48f91ce510bc1ebe4be38546c11afa8d2fa5
|
[
"Apache-2.0"
] | 19
|
2020-12-24T02:47:41.000Z
|
2022-03-18T07:33:52.000Z
|
core/commands/admin/info_group.py
|
TheLonelyAdventurer/nebula8
|
e8fd48f91ce510bc1ebe4be38546c11afa8d2fa5
|
[
"Apache-2.0"
] | 22
|
2020-09-29T04:56:29.000Z
|
2022-03-18T14:10:27.000Z
|
core/commands/admin/info_group.py
|
TheLonelyAdventurer/nebula8
|
e8fd48f91ce510bc1ebe4be38546c11afa8d2fa5
|
[
"Apache-2.0"
] | 15
|
2020-09-29T04:56:37.000Z
|
2022-01-24T15:39:07.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright SquirrelNetwork
from core.database.repository.group import GroupRepository
from languages.getLang import languages
from core.utilities.message import message
from core.handlers.welcome import save_group
from core import decorators
@decorators.admin.user_admin
@decorators.bot.check_is_admin
@decorators.public.init
@decorators.bot.check_can_delete
@decorators.delete.init
def init(update, context):
languages(update,context)
chat = update.effective_message.chat_id
chat_title = update.effective_chat.title
record = GroupRepository.SET_GROUP_NAME
row = GroupRepository().getById([chat])
if row:
data = [(chat_title, chat)]
GroupRepository().update_group_settings(record, data)
counter = GroupRepository().getUpdatesByChat(chat)
message(update,context,languages.group_info.format(
row['group_name'],
row['id_group'],
row['welcome_text'],
row['rules_text'],
row['languages'],
row['max_warn'],
counter['counter']))
else:
save_group(update)
| 32.771429
| 61
| 0.694856
|
3d8f4eec3191e69c6ca44d056b29605f41c85dd2
| 2,593
|
py
|
Python
|
openvision/datasets/sythtextprovider.py
|
liuzz1983/open_vision
|
f346e2f789944ea590c1d263e72a6e93490bb3a0
|
[
"MIT"
] | null | null | null |
openvision/datasets/sythtextprovider.py
|
liuzz1983/open_vision
|
f346e2f789944ea590c1d263e72a6e93490bb3a0
|
[
"MIT"
] | null | null | null |
openvision/datasets/sythtextprovider.py
|
liuzz1983/open_vision
|
f346e2f789944ea590c1d263e72a6e93490bb3a0
|
[
"MIT"
] | null | null | null |
## an initial version
## Transform the tfrecord to slim data provider format
import numpy
import tensorflow as tf
import os
slim = tf.contrib.slim
ITEMS_TO_DESCRIPTIONS = {
'image': 'slim.tfexample_decoder.Image',
'shape': 'shape',
'height': 'height',
'width': 'width',
'object/bbox': 'box',
'object/label': 'label'
}
SPLITS_TO_SIZES = {
'train': 858750,
}
NUM_CLASSES = 2
def get_datasets(data_dir,file_pattern = '*.tfrecord'):
file_patterns = os.path.join(data_dir, file_pattern)
print 'file_path: {}'.format(file_patterns)
reader = tf.TFRecordReader
keys_to_features = {
'image/height': tf.FixedLenFeature([1], tf.int64),
'image/width': tf.FixedLenFeature([1], tf.int64),
'image/channels': tf.FixedLenFeature([1], tf.int64),
'image/shape': tf.FixedLenFeature([3], tf.int64),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/format': tf.FixedLenFeature([], tf.string, default_value='jpeg'),
'image/encoded': tf.FixedLenFeature([], tf.string, default_value=''),
'image/name': tf.VarLenFeature(dtype = tf.string),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
#'image': slim.tfexample_decoder.Tensor('image/encoded'),
'shape': slim.tfexample_decoder.Tensor('image/shape'),
'height': slim.tfexample_decoder.Tensor('image/height'),
'width': slim.tfexample_decoder.Tensor('image/width'),
'object/bbox': slim.tfexample_decoder.BoundingBox(
[ 'xmin', 'ymin','xmax', 'ymax'], 'image/object/bbox/'),
'object/label': slim.tfexample_decoder.Tensor('image/object/bbox/label'),
#'imaname': slim.tfexample_decoder.Tensor('image/name'),
#'objext/txt': slim.tfexample_decoder.Tensor('image/object/bbox/label_text'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
return slim.dataset.Dataset(
data_sources=file_patterns,
reader=reader,
decoder=decoder,
num_samples=SPLITS_TO_SIZES['train'],
items_to_descriptions=ITEMS_TO_DESCRIPTIONS,
num_classes=NUM_CLASSES,
labels_to_names=labels_to_names)
| 36.013889
| 85
| 0.66371
|
45cf0eeb9178cbf1cf37b6ac7dbed185d7f842b7
| 4,009
|
py
|
Python
|
basic_test/run_glove_test.py
|
yangheng95/LCF-ABSA
|
0eeb4788269a498d34c2aff942e03af78026617e
|
[
"MIT"
] | 31
|
2019-10-07T03:05:39.000Z
|
2020-06-17T01:34:21.000Z
|
basic_test/run_glove_test.py
|
yangheng95/LCF-ABSA
|
0eeb4788269a498d34c2aff942e03af78026617e
|
[
"MIT"
] | 7
|
2019-10-16T13:37:52.000Z
|
2020-03-30T03:40:56.000Z
|
basic_test/run_glove_test.py
|
yangheng95/LCF-ABSA
|
0eeb4788269a498d34c2aff942e03af78026617e
|
[
"MIT"
] | 3
|
2020-01-12T13:03:35.000Z
|
2020-06-11T08:26:01.000Z
|
# -*- coding: utf-8 -*-
# file: run_glove_test.py
# time: 2021/12/4
# author: yangheng <yangheng@m.scnu.edu.cn>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
import shutil
from torch import cuda
from pyabsa import APCModelList, BERTBaselineAPCModelList, GloVeAPCModelList, \
ATEPCModelList, BERTClassificationModelList, GloVeClassificationModelList
from pyabsa import ABSADatasetList, ClassificationDatasetList
from pyabsa import APCConfigManager
from pyabsa import ATEPCConfigManager
from pyabsa import ClassificationConfigManager
from pyabsa.functional import Trainer
from findfile import find_cwd_dir
import warnings
warnings.filterwarnings('ignore')
#######################################################################################################
# This script is used for basic test #
# The config test are ignored due to computation limitation #
#######################################################################################################
atepc_examples = ['But the staff was so nice to us .',
'But the staff was so horrible to us .',
r'Not only was the food outstanding , but the little ` perks \' were great .',
'It took half an hour to get our check , which was perfect since we could sit , have drinks and talk !',
'It was pleasantly uncrowded , the service was delightful , the garden adorable , '
'the food -LRB- from appetizers to entrees -RRB- was delectable .',
'How pretentious and inappropriate for MJ Grill to claim that it provides power lunch and dinners !'
]
apc_examples = [
'Strong build though which really adds to its [ASP]durability[ASP] .', # !sent! Positive
'Strong [ASP]build[ASP] though which really adds to its durability . !sent! Positive',
'The [ASP]battery life[ASP] is excellent - 6-7 hours without charging . !sent! Positive',
'I have had my computer for 2 weeks already and it [ASP]works[ASP] perfectly . !sent! Positive',
'And I may be the only one but I am really liking [ASP]Windows 8[ASP] . !sent! Positive',
]
# # for dataset in ABSADatasetList():
for dataset in ABSADatasetList()[:1]:
for model in GloVeAPCModelList():
config = APCConfigManager.get_apc_config_english()
cuda.empty_cache()
config.model = model
config.cache_dataset = True
config.num_epoch = 1
config.evaluate_begin = 0
config.log_step = -1
sent_classifier = Trainer(config=config,
dataset=dataset,
checkpoint_save_mode=2,
auto_device='allcuda'
).load_trained_model()
for ex in apc_examples:
result = sent_classifier.infer(ex, print_result=True, ignore_error=False)
try:
shutil.rmtree(find_cwd_dir('checkpoints'))
del sent_classifier
cuda.empty_cache()
except Exception as e:
print(e)
for dataset in ClassificationDatasetList():
for model in GloVeClassificationModelList():
config = ClassificationConfigManager.get_classification_config_english()
config.model = model
config.num_epoch = 1
config.evaluate_begin = 0
config.log_step = -1
text_classifier = Trainer(config=config,
dataset=dataset,
checkpoint_save_mode=1,
auto_device='allcuda'
).load_trained_model()
text_classifier.infer('I love it very much!')
try:
shutil.rmtree(find_cwd_dir('checkpoints'))
del text_classifier
cuda.empty_cache()
except Exception as e:
print(e)
| 43.576087
| 122
| 0.583188
|
3e17ac7d0be1260e528611f516f0b11b677e20c3
| 419
|
py
|
Python
|
3-mouth04/day05/mysite3/otm/models.py
|
gary-gggggg/gary
|
d8ba30ea4bc2b662a2d6a87d247f813e5680d63e
|
[
"Apache-2.0"
] | 4
|
2021-02-01T10:28:11.000Z
|
2021-02-01T10:34:40.000Z
|
3-mouth04/day05/mysite3/otm/models.py
|
gary-gggggg/gary
|
d8ba30ea4bc2b662a2d6a87d247f813e5680d63e
|
[
"Apache-2.0"
] | null | null | null |
3-mouth04/day05/mysite3/otm/models.py
|
gary-gggggg/gary
|
d8ba30ea4bc2b662a2d6a87d247f813e5680d63e
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
# Create your models here.
class Publisher(models.Model):
name = models.CharField('名称', max_length=50, unique=True)
def __str__(self):
return self.name
class Book(models.Model):
title = models.CharField('书名', max_length=50)
publisher = models.ForeignKey(Publisher, verbose_name='出版社', on_delete=models.CASCADE)
def __str__(self):
return self.title
| 23.277778
| 90
| 0.701671
|
8657f3e1d2a87659e5b10d266266fd37c618f675
| 3,700
|
py
|
Python
|
official/vision/serving/export_saved_model_lib_v2.py
|
wnorris/models
|
a5e4965d1f4e4b02d51aa344336b6fff53af7c17
|
[
"Apache-2.0"
] | 1
|
2020-09-14T10:46:07.000Z
|
2020-09-14T10:46:07.000Z
|
official/vision/serving/export_saved_model_lib_v2.py
|
wnorris/models
|
a5e4965d1f4e4b02d51aa344336b6fff53af7c17
|
[
"Apache-2.0"
] | null | null | null |
official/vision/serving/export_saved_model_lib_v2.py
|
wnorris/models
|
a5e4965d1f4e4b02d51aa344336b6fff53af7c17
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Vision models export utility function for serving/inference."""
import os
from typing import Optional, List, Union, Text, Dict
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import export_base
from official.core import train_utils
from official.vision.serving import export_module_factory
def export(
input_type: str,
batch_size: Optional[int],
input_image_size: List[int],
params: cfg.ExperimentConfig,
checkpoint_path: str,
export_dir: str,
num_channels: Optional[int] = 3,
export_module: Optional[export_base.ExportModule] = None,
export_checkpoint_subdir: Optional[str] = None,
export_saved_model_subdir: Optional[str] = None,
function_keys: Optional[Union[List[Text], Dict[Text, Text]]] = None,
save_options: Optional[tf.saved_model.SaveOptions] = None):
"""Exports the model specified in the exp config.
Saved model is stored at export_dir/saved_model, checkpoint is saved
at export_dir/checkpoint, and params is saved at export_dir/params.yaml.
Args:
input_type: One of `image_tensor`, `image_bytes`, `tf_example`.
batch_size: 'int', or None.
input_image_size: List or Tuple of height and width.
params: Experiment params.
checkpoint_path: Trained checkpoint path or directory.
export_dir: Export directory path.
num_channels: The number of input image channels.
export_module: Optional export module to be used instead of using params
to create one. If None, the params will be used to create an export
module.
export_checkpoint_subdir: Optional subdirectory under export_dir
to store checkpoint.
export_saved_model_subdir: Optional subdirectory under export_dir
to store saved model.
function_keys: a list of string keys to retrieve pre-defined serving
signatures. The signaute keys will be set with defaults. If a dictionary
is provided, the values will be used as signature keys.
save_options: `SaveOptions` for `tf.saved_model.save`.
"""
if export_checkpoint_subdir:
output_checkpoint_directory = os.path.join(
export_dir, export_checkpoint_subdir)
else:
output_checkpoint_directory = None
if export_saved_model_subdir:
output_saved_model_directory = os.path.join(
export_dir, export_saved_model_subdir)
else:
output_saved_model_directory = export_dir
export_module = export_module_factory.get_export_module(
params,
input_type=input_type,
batch_size=batch_size,
input_image_size=input_image_size,
num_channels=num_channels)
export_base.export(
export_module,
function_keys=function_keys if function_keys else [input_type],
export_savedmodel_dir=output_saved_model_directory,
checkpoint_path=checkpoint_path,
timestamped=False,
save_options=save_options)
if output_checkpoint_directory:
ckpt = tf.train.Checkpoint(model=export_module.model)
ckpt.save(os.path.join(output_checkpoint_directory, 'ckpt'))
train_utils.serialize_config(params, export_dir)
| 37.755102
| 78
| 0.757297
|
54597a53bed51356ff6b39df928cb250f6847261
| 806
|
py
|
Python
|
tests/repository/test_in_memory_repos.py
|
banillie/bcompiler-engine
|
26b63b6e630e2925175ffff6b48b42d70f7ba544
|
[
"MIT"
] | null | null | null |
tests/repository/test_in_memory_repos.py
|
banillie/bcompiler-engine
|
26b63b6e630e2925175ffff6b48b42d70f7ba544
|
[
"MIT"
] | null | null | null |
tests/repository/test_in_memory_repos.py
|
banillie/bcompiler-engine
|
26b63b6e630e2925175ffff6b48b42d70f7ba544
|
[
"MIT"
] | null | null | null |
# type: ignore
import json
import pytest
from engine.exceptions import DatamapNotCSVException
from engine.repository.datamap import InMemorySingleDatamapRepository
def test_datamapline_repository_single_file_repo(datamap, datamapline_list_objects):
repo = InMemorySingleDatamapRepository(datamap)
assert repo.list_as_objs()[0].key == datamapline_list_objects[0].key
assert repo.list_as_objs()[0].sheet == datamapline_list_objects[0].sheet
assert json.loads(repo.list_as_json())[0]["key"] == "Project/Programme Name"
def test_datamapline_repository_non_existant_file(datamapline_list_objects):
with pytest.raises(DatamapNotCSVException):
repo = InMemorySingleDatamapRepository("non-file.txt") # noqua
repo.list_as_objs()[0].key == datamapline_list_objects[0].key
| 36.636364
| 84
| 0.792804
|
a4aab6782810f9c148100e4c48422f5a4b7c3c28
| 3,356
|
py
|
Python
|
evaluation.py
|
Ary80/prova
|
059b814da402a32ce5e510af1b7377ad09bf4422
|
[
"MIT"
] | 10
|
2018-07-26T15:01:55.000Z
|
2022-03-23T13:45:20.000Z
|
evaluation.py
|
Ary80/prova
|
059b814da402a32ce5e510af1b7377ad09bf4422
|
[
"MIT"
] | 1
|
2019-08-12T11:43:19.000Z
|
2019-08-12T11:43:19.000Z
|
evaluation.py
|
Ary80/prova
|
059b814da402a32ce5e510af1b7377ad09bf4422
|
[
"MIT"
] | 1
|
2020-02-08T00:55:20.000Z
|
2020-02-08T00:55:20.000Z
|
import numpy as np
from scipy.stats import spearmanr
from scipy.spatial.distance import cosine
import Levenshtein as levenshtein
import collections
def task_accuracy_metrics(reward_list):
""" Accuracy as percentage of examples that received rewards """
accuracy = sum(reward_list)*100/float(len(reward_list))
print("Total Reward: %s, Accuracy: %s %%"%(sum(reward_list),accuracy))
return accuracy
def action_distribution(action_list):
print(action_list)
counter = collections.Counter(action_list)
return counter
def levenshtein_message_distance(m1, m2):
""" Use python-levenshtein package to calculate edit distance """
return levenshtein.distance(m1,m2)
def message_sequence_to_alphabet(message, alphabet):
if type(message[0]) is np.int64:
return alphabet[int(message[0])]
elif type(message[0]) is list:
return "".join(alphabet[int(idx)] for idx in message[0])
else:
return "".join(alphabet[int(idx)] for idx in message)
def topographic_similarity(input_vectors, messages):
"""
Calculate negative spearman correlation between message levenshtein distances
and cosine similarities of input vectors
"""
## Calculate levenshtein distance between all message pairs
message_similarities = []
for idx, message in enumerate(messages):
other_messages = messages
other_messages.pop(idx)
for other_message in messages:
lev_dist = levenshtein_message_distance(message, other_message)
message_similarities.append(lev_dist)
## Calculate cosine similarity of target and chosen vectors
input_vect_similarities = []
for idx, input_vect in enumerate(input_vectors):
other_input_vectors = input_vectors
other_input_vectors.pop(idx)
for other_input in other_input_vectors:
cos_dist = cosine(input_vect,other_input)
input_vect_similarities.append(cos_dist)
## Calculate negative Spearman correlation between message distances and vector similarities
rho = spearmanr(message_similarities,input_vect_similarities)
return - rho.correlation
def obtain_metrics(training_stats, config_dict):
""" Compute metrics given trianing stats list of dicts"""
metrics = {}
## Accuracy
reward_list = [e['reward'] for e in training_stats]
metrics['accuracy'] = task_accuracy_metrics(reward_list)
## Speaker action distribution
message_list = []
for e in training_stats:
for m in e["message"]:
## Variable message lengths
if type(m) is np.int64:
message_list.append(m)
else:
for m_ in m:
message_list.append(m_)
metrics["speaker_action_dist"] = action_distribution(message_list)
print("Speaker action distribution: %s"%(metrics["speaker_action_dist"]))
## Listener action distribution
action_list = []
for e in training_stats:
print(e["chosen_target_idx"])
for t in e["chosen_target_idx"]:
action_list.append(t)
metrics["listener_action_dist"] = action_distribution(action_list)
print("Listener action distribution: %s"%(metrics["listener_action_dist"]))
## Topographic similarity
input_vectors = [e['input'] for e in training_stats]
messages = [message_sequence_to_alphabet(e['message'], config_dict['alphabet']) for e in training_stats]
metrics['topographical_sim'] = topographic_similarity(input_vectors, messages)
print("Topographical Similarity: %s"%(metrics['topographical_sim']))
return metrics
| 21.651613
| 105
| 0.757747
|
1cc97f931bd0094dc920a8a7034acb8b13463142
| 2,201
|
py
|
Python
|
src/utils/shell.py
|
sota/old-lang
|
4e7c93699a48f102f0d677a47bdfcf102d7a7db7
|
[
"MIT"
] | 1
|
2020-05-05T20:51:16.000Z
|
2020-05-05T20:51:16.000Z
|
src/utils/shell.py
|
sota/old-lang
|
4e7c93699a48f102f0d677a47bdfcf102d7a7db7
|
[
"MIT"
] | null | null | null |
src/utils/shell.py
|
sota/old-lang
|
4e7c93699a48f102f0d677a47bdfcf102d7a7db7
|
[
"MIT"
] | null | null | null |
'''
shell utilities
'''
import os
import re
import sys
import fnmatch
from subprocess import Popen, PIPE, CalledProcessError
from contextlib import contextmanager
#pylint: disable=invalid-name
def expandpath(path):
return os.path.realpath(os.path.expanduser(path))
def inversepath(path):
return '/'.join(['..' for _ in path.split('/')])
@contextmanager
def cd(*args, **kwargs):
mkdir = kwargs.pop('mkdir', True)
verbose = kwargs.pop('verbose', False)
path = os.path.sep.join(args)
path = os.path.normpath(path)
path = os.path.expanduser(path)
prev = os.getcwd()
if path != prev:
if mkdir:
call('mkdir -p %(path)s' % locals(), verbose=verbose)
os.chdir(path)
curr = os.getcwd()
sys.path.append(curr)
if verbose:
print 'cd %s' % curr
try:
yield
finally:
if path != prev:
sys.path.remove(curr)
os.chdir(prev)
if verbose:
print 'cd %s' % prev
def call(cmd, stdout=PIPE, stderr=PIPE, shell=True, nerf=False, throw=True, verbose=False):
if verbose or nerf:
print cmd
if nerf:
return (None, 'nerfed', 'nerfed')
process = Popen(cmd, stdout=stdout, stderr=stderr, shell=shell)
stdout, stderr = process.communicate()
exitcode = process.poll()
if verbose:
if stdout:
print stdout
if stderr:
print stderr
if throw and exitcode:
raise CalledProcessError(exitcode, 'cmd=%(cmd)s; stdout=%(stdout)s; stderr=%(stderr)s' % locals())
return exitcode, stdout, stderr
def rglob(pattern):
matches = []
# support for shell-like {x,y} syntax
regex = re.compile('(.*){(.*)}(.*)')
match = regex.search(pattern)
if match:
prefix, alternates, suffix = match.groups()
for alternate in alternates.split(','):
matches += rglob(prefix + alternate.strip() + suffix)
return matches
# support for recursive glob
for r, _, fs in os.walk(os.path.dirname(pattern)):
for f in fnmatch.filter(fs, os.path.basename(pattern)):
matches.append(os.path.join(r, f))
return matches
| 28.584416
| 106
| 0.602453
|
8018b516b1c8b66eecc02b4e133da97ff6345a81
| 787
|
py
|
Python
|
简单本地端口扫描[线程版].py
|
hzeyuan/100-Python
|
9def809d5a7c49bc63308d97ebbedf64cdca7ab4
|
[
"Apache-2.0"
] | 7
|
2021-02-11T07:40:57.000Z
|
2022-01-23T11:24:31.000Z
|
简单本地端口扫描[线程版].py
|
hzeyuan/100-Python
|
9def809d5a7c49bc63308d97ebbedf64cdca7ab4
|
[
"Apache-2.0"
] | null | null | null |
简单本地端口扫描[线程版].py
|
hzeyuan/100-Python
|
9def809d5a7c49bc63308d97ebbedf64cdca7ab4
|
[
"Apache-2.0"
] | 3
|
2020-01-25T08:45:55.000Z
|
2021-01-21T14:51:51.000Z
|
import socket
import threading
def get_ip_status(lock, ip, port):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.settimeout(1)
try:
result = server.connect_ex((ip, port))
with lock:
if result == 0:
print('{0} port {1} is open'.format(ip, port))
except Exception as err:
pass
finally:
server.close()
if __name__ == '__main__':
import time
start = time.time()
name = socket.getfqdn(socket.gethostname())
host = socket.gethostbyname(name)
port_range = (0, 10000)
lock = threading.Lock()
for port in range(port_range[0], port_range[1]):
t = threading.Thread(target=get_ip_status, args=(lock, host, port))
t.start()
print(time.time()-start)
| 26.233333
| 75
| 0.613723
|
1953c0d4c76c56f67fec68d55402b347aabe4e76
| 7,472
|
py
|
Python
|
Alignment/OfflineValidation/python/TkAlAllInOneTool/helperFunctions.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 6
|
2017-09-08T14:12:56.000Z
|
2022-03-09T23:57:01.000Z
|
Alignment/OfflineValidation/python/TkAlAllInOneTool/helperFunctions.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 545
|
2017-09-19T17:10:19.000Z
|
2022-03-07T16:55:27.000Z
|
Alignment/OfflineValidation/python/TkAlAllInOneTool/helperFunctions.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 14
|
2017-10-04T09:47:21.000Z
|
2019-10-23T18:04:45.000Z
|
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
import os
import re
import ROOT
import sys
from .TkAlExceptions import AllInOneError
import CondCore.Utilities.conddblib as conddblib
import six
####################--- Helpers ---############################
def replaceByMap(target, the_map):
"""This function replaces `.oO[key]Oo.` by `the_map[key]` in target.
Arguments:
- `target`: String which contains symbolic tags of the form `.oO[key]Oo.`
- `the_map`: Dictionary which has to contain the `key`s in `target` as keys
"""
result = target
for key in the_map:
lifeSaver = 10e3
iteration = 0
while ".oO[" in result and "]Oo." in result:
for key in the_map:
try:
result = result.replace(".oO["+key+"]Oo.",the_map[key])
except TypeError: #try a dict
try:
for keykey, value in six.iteritems(the_map[key]):
result = result.replace(".oO[" + key + "['" + keykey + "']]Oo.", value)
result = result.replace(".oO[" + key + '["' + keykey + '"]]Oo.', value)
except AttributeError: #try a list
try:
for index, value in enumerate(the_map[key]):
result = result.replace(".oO[" + key + "[" + str(index) + "]]Oo.", value)
except TypeError:
raise TypeError("Something is wrong in replaceByMap! Need a string, dict, or list, but the_map(%s)=%s!"%(repr(key), repr(the_map[key])))
iteration += 1
if iteration > lifeSaver:
problematicLines = ""
for line in result.splitlines():
if ".oO[" in result and "]Oo." in line:
problematicLines += "%s\n"%line
msg = ("Oh Dear, there seems to be an endless loop in "
"replaceByMap!!\n%s\n%s"%(problematicLines, the_map))
raise AllInOneError(msg)
return result
def getCommandOutput2(command):
"""This function executes `command` and returns it output.
Arguments:
- `command`: Shell command to be invoked by this function.
"""
child = os.popen(command)
data = child.read()
err = child.close()
if err:
raise RuntimeError('%s failed w/ exit code %d' % (command, err))
return data
def castorDirExists(path):
"""This function checks if the directory given by `path` exists.
Arguments:
- `path`: Path to castor directory
"""
if path[-1] == "/":
path = path[:-1]
containingPath = os.path.join( *path.split("/")[:-1] )
dirInQuestion = path.split("/")[-1]
try:
rawLines = getCommandOutput2("rfdir /"+containingPath).splitlines()
except RuntimeError:
return False
for line in rawLines:
if line.split()[0][0] == "d":
if line.split()[8] == dirInQuestion:
return True
return False
def replacelast(string, old, new, count = 1):
"""Replace the last occurances of a string"""
return new.join(string.rsplit(old,count))
fileExtensions = ["_cfg.py", ".sh", ".root"]
def addIndex(filename, njobs, index = None):
if index is None:
return [addIndex(filename, njobs, i) for i in range(njobs)]
if njobs == 1:
return filename
fileExtension = None
for extension in fileExtensions:
if filename.endswith(extension):
fileExtension = extension
if fileExtension is None:
raise AllInOneError(fileName + " does not end with any of the extensions "
+ str(fileExtensions))
return replacelast(filename, fileExtension, "_" + str(index) + fileExtension)
def parsecolor(color):
try: #simplest case: it's an int
return int(color)
except ValueError:
pass
try: #kRed, kBlue, ...
color = str(getattr(ROOT, color))
return int(color)
except (AttributeError, ValueError):
pass
if color.count("+") + color.count("-") == 1: #kRed+5, kGreen-2
if "+" in color: #don't want to deal with nonassociativity of -
split = color.split("+")
color1 = parsecolor(split[0])
color2 = parsecolor(split[1])
return color1 + color2
if "-" in color:
split = color.split("-")
color1 = parsecolor(split[0])
color2 = parsecolor(split[1])
return color1 - color2
raise AllInOneError("color has to be an integer, a ROOT constant (kRed, kBlue, ...), or a two-term sum or difference (kGreen-5)!")
def parsestyle(style):
try: #simplest case: it's an int
return int(style)
except ValueError:
pass
try: #kStar, kDot, ...
style = str(getattr(ROOT,style))
return int(style)
except (AttributeError, ValueError):
pass
raise AllInOneError("style has to be an integer or a ROOT constant (kDashed, kStar, ...)!")
def recursivesubclasses(cls):
result = [cls]
for subcls in cls.__subclasses__():
result += recursivesubclasses(subcls)
return result
def cache(function):
cache = {}
def newfunction(*args, **kwargs):
try:
return cache[args, tuple(sorted(six.iteritems(kwargs)))]
except TypeError:
print(args, tuple(sorted(six.iteritems(kwargs))))
raise
except KeyError:
cache[args, tuple(sorted(six.iteritems(kwargs)))] = function(*args, **kwargs)
return newfunction(*args, **kwargs)
newfunction.__name__ = function.__name__
return newfunction
def boolfromstring(string, name):
"""
Takes a string from the configuration file
and makes it into a bool
"""
#try as a string, not case sensitive
if string.lower() == "true": return True
if string.lower() == "false": return False
#try as a number
try:
return str(bool(int(string)))
except ValueError:
pass
#out of options
raise ValueError("{} has to be true or false!".format(name))
def pythonboolstring(string, name):
"""
Takes a string from the configuration file
and makes it into a bool string for a python template
"""
return str(boolfromstring(string, name))
def cppboolstring(string, name):
"""
Takes a string from the configuration file
and makes it into a bool string for a C++ template
"""
return pythonboolstring(string, name).lower()
def getTagsMap(db):
con = conddblib.connect(url = conddblib.make_url(db))
session = con.session()
TAG = session.get_dbtype(conddblib.Tag)
dictionary = {}
for i in range(0,len(session.query(TAG.object_type).order_by(TAG.name).all())):
q1 = session.query(TAG.object_type).order_by(TAG.name).all()[i][0]
q2 = session.query(TAG.name).order_by(TAG.name).all()[i][0]
dictionary[q1]=q2
return dictionary
def clean_name(s):
"""Transforms a string into a valid variable or method name.
Arguments:
- `s`: input string
"""
# Remove invalid characters
s = re.sub(r"[^0-9a-zA-Z_]", "", s)
# Remove leading characters until we find a letter or underscore
s = re.sub(r"^[^a-zA-Z_]+", "", s)
return s
| 32.9163
| 165
| 0.582173
|
558602d6dd89475d17bd9a82d3c7cd24e9729e36
| 2,258
|
py
|
Python
|
benchmarks/template_server.py
|
AltaverseDAO/bittensor
|
ce0e6887d7e2b279b168949d4e7730981db8de6f
|
[
"MIT"
] | 2
|
2020-03-11T19:40:05.000Z
|
2020-06-14T19:35:34.000Z
|
benchmarks/template_server.py
|
AltaverseDAO/bittensor
|
ce0e6887d7e2b279b168949d4e7730981db8de6f
|
[
"MIT"
] | null | null | null |
benchmarks/template_server.py
|
AltaverseDAO/bittensor
|
ce0e6887d7e2b279b168949d4e7730981db8de6f
|
[
"MIT"
] | null | null | null |
#!/bin/python3
# The MIT License (MIT)
# Copyright © 2021 Yuma Rao
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
""" Benchmarking pytest fixture.
Example:
$ python3 benchmarks/template_server.py --neuron.model_name albert-base-v1
"""
from benchmarks import QueryBenchmark
import multiprocessing
import bittensor
class Benchmark ( QueryBenchmark ):
r""" Benchmark pytest class.
"""
@staticmethod
def miner_name() -> str:
r""" Return miner name
"""
return 'template_server'
@staticmethod
def run_neuron( config , subtensor, metagraph, wallet ):
r""" To be implemented in the subclass, runs the neuron.
Args:
config (bittensor.Config)
Run config
"""
bittensor.neurons.text.template_server.neuron( config,subtensor=subtensor, metagraph=metagraph,wallet=wallet).run()
@staticmethod
def config() -> 'bittensor.Config':
r""" Return config
Returns:
config (bittensor.Config)
Run config.
"""
config = bittensor.neurons.text.template_server.neuron.config()
return config
if __name__ == '__main__':
benchmark = Benchmark()
benchmark.run()
| 36.419355
| 123
| 0.698849
|
e2353c5d276f86274743e88b0c7d583842557494
| 8,992
|
py
|
Python
|
noisypy/calibration_utils.py
|
wobbuuu/noisypy
|
3e4a48e55a391a9552da0df77f115e86ef100037
|
[
"MIT"
] | null | null | null |
noisypy/calibration_utils.py
|
wobbuuu/noisypy
|
3e4a48e55a391a9552da0df77f115e86ef100037
|
[
"MIT"
] | null | null | null |
noisypy/calibration_utils.py
|
wobbuuu/noisypy
|
3e4a48e55a391a9552da0df77f115e86ef100037
|
[
"MIT"
] | null | null | null |
import warnings
import numpy as np
import pandas as pd
import scipy as sp
from scipy.signal import savgol_filter
from matplotlib import pyplot as plt
import csaps
from .noisy_utils import det2power
from .settings import *
__all__ = ['calibrate', 'fit_calibration']
def calibrate(vbs, res_inds, det_inds, skip=[], name='./data/cond', both=True, det_ind=2,\
shift=1, r_set=None, preamp=None, r_wires=215/2, coef=None,\
r0=None, vmax=1e-3, sg_rpar=None, sg_det=None, sg_power=(9, 1), cs_smooth=None):
"""
ADD AUTOEXCLUSION OF BAD CURVES
Provides calibration data and calibration function
DC measurement in 3 probe geometry.
If 1 terminal of sample is grounded (DC), r_wires must be 0
"""
preamp = varz.preamp if not preamp else preamp
r_set = varz.r_set if not r_set else r_set
r0 = varz.r0 if not r0 else r0
coef = varz.coef if not coef else coef
r_set = np.atleast_1d(r_set)
if len(r_set) == 1:
r_set = r_set*np.ones(len(res_inds))
# Resistance vs Vtg
res = []
for j, ind in enumerate(res_inds):
vb = vbs[j]
df = pd.read_csv(name+str(ind)+'.dat', sep=' ', header=None)
if both:
df.loc[1::2,2::] = df.loc[1::2,:2:-1].values
for k in range(len(df)):
v = np.array(df.loc[k,2:].values/preamp, dtype='float64')
# Shift typical occurs even in DC measurements
v = pd.Series(v).shift(-shift)
# Subtract preamp offset
v -= v[np.abs(vb).argmin()]
i = (vb*coef - v) / r_set[j]
inds = np.abs(v) < vmax
r = np.nan
if len(i[inds]) > 3:
r = np.polyfit(i[inds], v[inds], 1)[0]
res.append(r)
res = np.array(res)
# Detector vs Vtg
det = pd.DataFrame()
det_inds = set(det_inds) - set(skip)
for ind in det_inds:
df = pd.read_csv(name+str(ind)+'.dat', sep=' ', header=None)
df.set_index(1, inplace=True)
det = pd.concat([det, df[det_ind]], axis=1, ignore_index=True)
# Always start measurement from smallest resistance (largest detector signal)
# if det.index[0] < det.index[-1]:
# det = det[::-1]
# This is sometimes wrong
vtg = det.index
det = det.mean(axis=1)
# Subtract wires (3-probe measurements)
r_sample = max(res)
with warnings.catch_warnings():
warnings.simplefilter(action='ignore')
r_tr = res*r_sample / (r_sample-res)
rpar = 1 / (1/r_tr + 1/(r_sample-r_wires) + 2/r0)
calib_tg = pd.DataFrame({'vtg':vtg, 'res':res, 'rpar':rpar, 'det':det,\
'power':det2power(det)})
if calib_tg['res'].isna().any():
print("Following transistor gate values are skipped due to vmax threshold:\n[",\
end='')
to_print = calib_tg[calib_tg['res'].isna()]['vtg'].values
[print('{:.4g}'.format(value), end=' ') for value in to_print]
print("]")
calib_tg.dropna(inplace=True)
# # subtract offset
# fit_inds = calib_tg['rpar'] < 300
# if len(calib_tg.loc[fit_inds, 'rpar']) > 2:
# _, P0 = np.polyfit(calib_tg.loc[fit_inds, 'rpar'], calib_tg.loc[fit_inds, 'power'], 1)
# calib_tg['power'] -= P0
fig, ax = plt.subplots(1, 3, figsize=varz.figsize13)
ax[0].plot(calib_tg['vtg'], calib_tg['rpar'], '.')
ax[1].plot(calib_tg['vtg'], calib_tg['det'], '.')
ax[2].plot(calib_tg['rpar'], calib_tg['power'], '.', ms=3)
# savgol_filter on rpar vs Vtg
if sg_rpar:
calib_tg.sort_values(by='vtg', inplace=True)
vtg_reg = np.arange(calib_tg['vtg'].min(), calib_tg['vtg'].max(),\
calib_tg['vtg'].diff().min())
rpar_smooth = savgol_filter(np.interp(vtg_reg, calib_tg['vtg'],\
calib_tg['rpar']), *sg_rpar)
calib_tg['rpar'] = np.interp(calib_tg['vtg'], vtg_reg, rpar_smooth)
ax[0].plot(calib_tg['vtg'], calib_tg['rpar'])
# savgol_filter on det vs Vtg
if sg_det:
calib_tg.sort_values(by='vtg', inplace=True)
vtg_reg = np.arange(calib_tg['vtg'].min(), calib_tg['vtg'].max(),\
calib_tg['vtg'].diff().min())
det_smooth = savgol_filter(np.interp(vtg_reg, calib_tg['vtg'],\
calib_tg['det']), *sg_det)
calib_tg['det'] = np.interp(calib_tg['vtg'], vtg_reg, det_smooth)
ax[1].plot(calib_tg['vtg'], calib_tg['det'])
if sg_det or sg_rpar:
calib_tg['power'] = det2power(calib_tg['det'])
ax[2].plot(calib_tg['rpar'], calib_tg['power'], '.', markersize=3)
# savgol_filter on power vs rpar or csaps smoothingspline
calib_tg.sort_values(by='rpar', inplace=True)
rpar_reg = np.linspace(calib_tg['rpar'].min(), calib_tg['rpar'].max(), 500)
if not cs_smooth:
power = np.interp(rpar_reg, calib_tg['rpar'], calib_tg['power'])
smoothed_power = savgol_filter(power, *sg_power)
calib = sp.interpolate.interp1d(rpar_reg, smoothed_power, fill_value='extrapolate')
else:
calib = csaps.CubicSmoothingSpline(calib_tg['rpar'], calib_tg['power'],\
smooth=cs_smooth)
calib_data = calib_tg.loc[calib_tg['rpar']>0, :]
ax[2].plot(rpar_reg, calib(rpar_reg))
ax[0].set_xlabel(get_label('Vtg'))
ax[0].set_ylabel(get_label('rpar'))
ax[1].set_xlabel(get_label('Vtg'))
ax[1].set_ylabel(get_label('Vdet'))
ax[2].set_xlabel(get_label('rpar'))
ax[2].set_ylabel(get_label('P'))
return calib_data, calib
def fit_calibration(calib_data, filt, fitrange=[], p0_dict={'C':18e-12, 'f0':22.5e6,\
'G':1.4e7, 'S_amp':5e-27, 'P0':0, 'T':4.2}, fixed=['f0', 'T'], kwargs={},\
names={'r_par':'rpar', 'power':'power', 'f':'f', 'Tr':'Tr'}, plot=True):
if {'C', 'f0', 'G', 'S_amp', 'P0', 'T'} -\
set(p0_dict.keys()) - set(fixed) != set():
raise Exception
if len(fitrange) == 2:
inds = (calib_data[names['r_par']] > fitrange[0]) &\
(calib_data[names['r_par']] < fitrange[1])
else:
inds = calib_data.index
r_par_fit = calib_data.loc[inds, names['r_par']].values
power_fit = calib_data.loc[inds, names['power']].values
r_par = calib_data[names['r_par']].values
power = calib_data[names['power']].values
f = filt[names['f']].values
Tr = filt[names['Tr']].values
x0 = []
param_inds = {}
for k, v in p0_dict.items():
if k not in fixed:
x0.append(v)
param_inds[k] = len(x0) - 1
def _params2dict(params):
p_dict = {k:params[param_inds[k]] for k in param_inds}
p_dict.update({k:p0_dict[k] for k in fixed})
return p_dict
def _G(r_par, p_dict):
r_par = np.atleast_1d(r_par)
p_dict['L'] = 1 / (4* np.pi**2 * p_dict['C'] * p_dict['f0']**2)
shape = (len(r_par), len(f))
mesh = dict()
mesh['r_par'] = np.array([r_par for i in range(shape[1])]).T
mesh['f'] = np.array([f for i in range(shape[0])])
mesh['Tr'] = np.array([Tr for i in range(shape[0])])
with warnings.catch_warnings():
warnings.simplefilter(action='ignore')
mesh['z_lc'] = np.abs(1 / (2j*np.pi*mesh['f']*p_dict['C'] +\
1/(2j*np.pi*mesh['f']*p_dict['L'])))
integral = np.trapz(mesh['Tr']*p_dict['G']/\
(mesh['r_par']**(-2) + mesh['z_lc']**(-2))/50, f, axis=1)
return integral
def _get_power(r_par, p_dict):
r_par = np.atleast_1d(r_par)
return (4*kB*p_dict['T']/r_par +\
p_dict['S_amp'])*_G(r_par, p_dict) + p_dict['P0']
def _power_fitting_min(params, r_par, power):
p_dict = _params2dict(params)
return np.trapz((_get_power(r_par, p_dict) - power)**2, r_par)
with warnings.catch_warnings():
warnings.simplefilter(action='ignore')
result = sp.optimize.minimize(_power_fitting_min, x0=x0, method='Nelder-Mead',\
args=(r_par_fit, power_fit,), **kwargs)
p_dict = _params2dict(result.x)
[print(str(k)+': '+'{:.3g}'.format(v), end='; ') for k,v in p_dict.items()]
print()
if plot:
fig, ax = plt.subplots()
ax.plot(r_par, _get_power(r_par, p0_dict), lw=1.5, label='x0')
ax.plot(r_par, power, 'black', marker='.', ms=4, lw=0, label='data')
ax.plot(r_par, _get_power(r_par, p_dict), lw=1.5, label='Nelder-Mead')
ax.set_xlabel(get_label('rpar'))
ax.set_ylabel(get_label('P'))
ax.legend();
return lambda x: _get_power(x, _params2dict(result.x)),\
lambda x: _G(x, _params2dict(result.x)), p_dict
| 39.438596
| 96
| 0.560276
|
9bebbedc7bc2e78818ae1b4594d94751fc4a240a
| 19,680
|
py
|
Python
|
contrib/runners/winrm_runner/winrm_runner/winrm_base.py
|
anirudhbagri/st2
|
bedc17015c6f0c4d7c00b30684ae6015b55bf702
|
[
"Apache-2.0"
] | null | null | null |
contrib/runners/winrm_runner/winrm_runner/winrm_base.py
|
anirudhbagri/st2
|
bedc17015c6f0c4d7c00b30684ae6015b55bf702
|
[
"Apache-2.0"
] | null | null | null |
contrib/runners/winrm_runner/winrm_runner/winrm_base.py
|
anirudhbagri/st2
|
bedc17015c6f0c4d7c00b30684ae6015b55bf702
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import base64
import os
import re
import six
import time
from base64 import b64encode
from contextlib import contextmanager
from st2common import log as logging
from st2common.constants import action as action_constants
from st2common.constants import exit_codes as exit_code_constants
from st2common.runners.base import ActionRunner
from st2common.util import jsonify
from winrm import Session, Response
from winrm.exceptions import WinRMOperationTimeoutError
__all__ = [
"WinRmBaseRunner",
]
LOG = logging.getLogger(__name__)
RUNNER_CWD = "cwd"
RUNNER_ENV = "env"
RUNNER_HOST = "host"
RUNNER_KWARG_OP = "kwarg_op"
RUNNER_PASSWORD = "password"
RUNNER_PORT = "port"
RUNNER_SCHEME = "scheme"
RUNNER_TIMEOUT = "timeout"
RUNNER_TRANSPORT = "transport"
RUNNER_USERNAME = "username"
RUNNER_VERIFY_SSL = "verify_ssl_cert"
WINRM_DEFAULT_TMP_DIR_PS = "[System.IO.Path]::GetTempPath()"
# maximum cmdline length for systems >= Windows XP
# https://support.microsoft.com/en-us/help/830473/command-prompt-cmd-exe-command-line-string-limitation
WINRM_MAX_CMD_LENGTH = 8191
WINRM_HTTPS_PORT = 5986
WINRM_HTTP_PORT = 5985
# explicity made so that it does not equal SUCCESS so a failure is returned
WINRM_TIMEOUT_EXIT_CODE = exit_code_constants.SUCCESS_EXIT_CODE - 1
# number of bytes in each chunk when uploading data via WinRM to a target host
# this was chosen arbitrarily and could probably use some tuning
WINRM_UPLOAD_CHUNK_SIZE_BYTES = 2048
DEFAULT_KWARG_OP = "-"
DEFAULT_PORT = WINRM_HTTPS_PORT
DEFAULT_SCHEME = "https"
DEFAULT_TIMEOUT = 60
DEFAULT_TRANSPORT = "ntlm"
DEFAULT_VERIFY_SSL = True
RESULT_KEYS_TO_TRANSFORM = ["stdout", "stderr"]
# key = value in linux/bash to escape
# value = powershell escaped equivalent
#
# Compiled list from the following sources:
# https://ss64.com/ps/syntax-esc.html
# https://www.techotopia.com/index.php/Windows_PowerShell_1.0_String_Quoting_and_Escape_Sequences#PowerShell_Special_Escape_Sequences
PS_ESCAPE_SEQUENCES = {
"\n": "`n",
"\r": "`r",
"\t": "`t",
"\a": "`a",
"\b": "`b",
"\f": "`f",
"\v": "`v",
'"': '`"',
"'": "`'",
"`": "``",
"\0": "`0",
"$": "`$",
}
class WinRmRunnerTimoutError(Exception):
def __init__(self, response):
self.response = response
class WinRmBaseRunner(ActionRunner):
def pre_run(self):
super(WinRmBaseRunner, self).pre_run()
# common connection parameters
self._session = None
self._host = self.runner_parameters[RUNNER_HOST]
self._username = self.runner_parameters[RUNNER_USERNAME]
self._password = self.runner_parameters[RUNNER_PASSWORD]
self._timeout = self.runner_parameters.get(RUNNER_TIMEOUT, DEFAULT_TIMEOUT)
self._read_timeout = (
self._timeout + 1
) # read_timeout must be > operation_timeout
# default to https port 5986 over ntlm
self._port = self.runner_parameters.get(RUNNER_PORT, DEFAULT_PORT)
self._scheme = self.runner_parameters.get(RUNNER_SCHEME, DEFAULT_SCHEME)
self._transport = self.runner_parameters.get(
RUNNER_TRANSPORT, DEFAULT_TRANSPORT
)
# if connecting to the HTTP port then we must use "http" as the scheme
# in the URL
if self._port == WINRM_HTTP_PORT:
self._scheme = "http"
# construct the URL for connecting to WinRM on the host
self._winrm_url = "{}://{}:{}/wsman".format(
self._scheme, self._host, self._port
)
# default to verifying SSL certs
self._verify_ssl = self.runner_parameters.get(
RUNNER_VERIFY_SSL, DEFAULT_VERIFY_SSL
)
self._server_cert_validation = "validate" if self._verify_ssl else "ignore"
# additional parameters
self._cwd = self.runner_parameters.get(RUNNER_CWD, None)
self._env = self.runner_parameters.get(RUNNER_ENV, {})
self._env = self._env or {}
self._kwarg_op = self.runner_parameters.get(RUNNER_KWARG_OP, DEFAULT_KWARG_OP)
def _get_session(self):
# cache session (only create if it doesn't exist yet)
if not self._session:
LOG.debug("Connecting via WinRM to url: {}".format(self._winrm_url))
self._session = Session(
self._winrm_url,
auth=(self._username, self._password),
transport=self._transport,
server_cert_validation=self._server_cert_validation,
operation_timeout_sec=self._timeout,
read_timeout_sec=self._read_timeout,
)
return self._session
def _winrm_get_command_output(self, protocol, shell_id, command_id):
# NOTE: this is copied from pywinrm because it doesn't support
# timeouts
stdout_buffer, stderr_buffer = [], []
return_code = 0
command_done = False
start_time = time.time()
while not command_done:
# check if we need to timeout (StackStorm custom)
current_time = time.time()
elapsed_time = current_time - start_time
if self._timeout and (elapsed_time > self._timeout):
raise WinRmRunnerTimoutError(
Response(
(
b"".join(stdout_buffer),
b"".join(stderr_buffer),
WINRM_TIMEOUT_EXIT_CODE,
)
)
)
# end stackstorm custom
try:
(
stdout,
stderr,
return_code,
command_done,
) = protocol._raw_get_command_output(shell_id, command_id)
stdout_buffer.append(stdout)
stderr_buffer.append(stderr)
except WinRMOperationTimeoutError:
# this is an expected error when waiting for a long-running process,
# just silently retry
pass
return b"".join(stdout_buffer), b"".join(stderr_buffer), return_code
def _winrm_run_cmd(self, session, command, args=(), env=None, cwd=None):
# NOTE: this is copied from pywinrm because it doesn't support
# passing env and working_directory from the Session.run_cmd.
# It also doesn't support timeouts. All of these things have been
# added
shell_id = session.protocol.open_shell(env_vars=env, working_directory=cwd)
command_id = session.protocol.run_command(shell_id, command, args)
# try/catch is for custom timeout handing (StackStorm custom)
try:
rs = Response(
self._winrm_get_command_output(session.protocol, shell_id, command_id)
)
rs.timeout = False
except WinRmRunnerTimoutError as e:
rs = e.response
rs.timeout = True
# end stackstorm custom
session.protocol.cleanup_command(shell_id, command_id)
session.protocol.close_shell(shell_id)
return rs
def _winrm_encode(self, script):
return b64encode(script.encode("utf_16_le")).decode("ascii")
def _winrm_ps_cmd(self, encoded_ps):
return "powershell -encodedcommand {0}".format(encoded_ps)
def _winrm_run_ps(self, session, script, env=None, cwd=None, is_b64=False):
# NOTE: this is copied from pywinrm because it doesn't support
# passing env and working_directory from the Session.run_ps
# encode the script in UTF only if it isn't passed in encoded
LOG.debug("_winrm_run_ps() - script size = {}".format(len(script)))
encoded_ps = script if is_b64 else self._winrm_encode(script)
ps_cmd = self._winrm_ps_cmd(encoded_ps)
LOG.debug("_winrm_run_ps() - ps cmd size = {}".format(len(ps_cmd)))
rs = self._winrm_run_cmd(session, ps_cmd, env=env, cwd=cwd)
if len(rs.std_err):
# if there was an error message, clean it it up and make it human
# readable
if isinstance(rs.std_err, bytes):
# decode bytes into utf-8 because of a bug in pywinrm
# real fix is here: https://github.com/diyan/pywinrm/pull/222/files
rs.std_err = rs.std_err.decode("utf-8")
rs.std_err = session._clean_error_msg(rs.std_err)
return rs
def _translate_response(self, response):
# check exit status for errors
succeeded = response.status_code == exit_code_constants.SUCCESS_EXIT_CODE
status = action_constants.LIVEACTION_STATUS_SUCCEEDED
status_code = response.status_code
if response.timeout:
status = action_constants.LIVEACTION_STATUS_TIMED_OUT
status_code = WINRM_TIMEOUT_EXIT_CODE
elif not succeeded:
status = action_constants.LIVEACTION_STATUS_FAILED
# create result
result = {
"failed": not succeeded,
"succeeded": succeeded,
"return_code": status_code,
"stdout": response.std_out,
"stderr": response.std_err,
}
# Ensure stdout and stderr is always a string
if isinstance(result["stdout"], six.binary_type):
result["stdout"] = result["stdout"].decode("utf-8")
if isinstance(result["stderr"], six.binary_type):
result["stderr"] = result["stderr"].decode("utf-8")
# automatically convert result stdout/stderr from JSON strings to
# objects so they can be used natively
return (status, jsonify.json_loads(result, RESULT_KEYS_TO_TRANSFORM), None)
def _make_tmp_dir(self, parent):
LOG.debug(
"Creating temporary directory for WinRM script in parent: {}".format(parent)
)
ps = """$parent = {parent}
$name = [System.IO.Path]::GetRandomFileName()
$path = Join-Path $parent $name
New-Item -ItemType Directory -Path $path | Out-Null
$path""".format(
parent=parent
)
result = self._run_ps_or_raise(
ps, ("Unable to make temporary directory for" " powershell script")
)
# strip to remove trailing newline and whitespace (if any)
return result["stdout"].strip()
def _rm_dir(self, directory):
ps = 'Remove-Item -Force -Recurse -Path "{}"'.format(directory)
self._run_ps_or_raise(
ps, "Unable to remove temporary directory for powershell script"
)
def _upload(self, src_path_or_data, dst_path):
src_data = None
# detect if this is a path or a string containing data
# if this is a path, then read the data from the path
if os.path.exists(src_path_or_data):
LOG.debug("WinRM uploading local file: {}".format(src_path_or_data))
with open(src_path_or_data, "r") as src_file:
src_data = src_file.read()
else:
LOG.debug("WinRM uploading data from a string")
src_data = src_path_or_data
# upload the data in chunks such that each chunk doesn't exceed the
# max command size of the windows command line
for i in range(0, len(src_data), WINRM_UPLOAD_CHUNK_SIZE_BYTES):
LOG.debug(
"WinRM uploading data bytes: {}-{}".format(
i, (i + WINRM_UPLOAD_CHUNK_SIZE_BYTES)
)
)
self._upload_chunk(
dst_path, src_data[i : (i + WINRM_UPLOAD_CHUNK_SIZE_BYTES)]
)
def _upload_chunk(self, dst_path, src_data):
# adapted from https://github.com/diyan/pywinrm/issues/18
if not isinstance(src_data, six.binary_type):
src_data = src_data.encode("utf-8")
ps = """$filePath = "{dst_path}"
$s = @"
{b64_data}
"@
$data = [System.Convert]::FromBase64String($s)
Add-Content -value $data -encoding byte -path $filePath
""".format(
dst_path=dst_path, b64_data=base64.b64encode(src_data).decode("utf-8")
)
LOG.debug("WinRM uploading chunk, size = {}".format(len(ps)))
self._run_ps_or_raise(ps, "Failed to upload chunk of powershell script")
@contextmanager
def _tmp_script(self, parent, script):
tmp_dir = None
try:
LOG.info("WinRM Script - Making temporary directory")
tmp_dir = self._make_tmp_dir(parent)
LOG.debug("WinRM Script - Tmp directory created: {}".format(tmp_dir))
LOG.info("WinRM Script = Upload starting")
tmp_script = tmp_dir + "\\script.ps1"
LOG.debug("WinRM Uploading script to: {}".format(tmp_script))
self._upload(script, tmp_script)
LOG.info("WinRM Script - Upload complete")
yield tmp_script
finally:
if tmp_dir:
LOG.debug("WinRM Script - Removing script: {}".format(tmp_dir))
self._rm_dir(tmp_dir)
def run_cmd(self, cmd):
# connect
session = self._get_session()
# execute
response = self._winrm_run_cmd(session, cmd, env=self._env, cwd=self._cwd)
# create triplet from WinRM response
return self._translate_response(response)
def run_ps(self, script, params=None):
# temporary directory for the powershell script
if params:
powershell = "& {%s} %s" % (script, params)
else:
powershell = script
encoded_ps = self._winrm_encode(powershell)
ps_cmd = self._winrm_ps_cmd(encoded_ps)
# if the powershell script is small enough to fit in one command
# then run it as a single command (faster)
# else we need to upload the script to a temporary file and execute it,
# then remove the temporary file
if len(ps_cmd) <= WINRM_MAX_CMD_LENGTH:
LOG.info(
(
"WinRM powershell command size {} is > {}, the max size of a"
" powershell command. Converting to a script execution."
).format(WINRM_MAX_CMD_LENGTH, len(ps_cmd))
)
return self._run_ps(encoded_ps, is_b64=True)
else:
return self._run_ps_script(script, params)
def _run_ps(self, powershell, is_b64=False):
"""Executes a powershell command, no checks for length are done in this version.
The lack of checks here is intentional so that we don't run into an infinte loop
when converting a long command to a script"""
# connect
session = self._get_session()
# execute
response = self._winrm_run_ps(
session, powershell, env=self._env, cwd=self._cwd, is_b64=is_b64
)
# create triplet from WinRM response
return self._translate_response(response)
def _run_ps_script(self, script, params=None):
tmp_dir = WINRM_DEFAULT_TMP_DIR_PS
# creates a temporary file,
# upload the contents of 'script' to the temporary file
# handle deletion of the temporary file on exit of the with block
with self._tmp_script(tmp_dir, script) as tmp_script:
# the following wraps the script (from the file) in a script block ( {} )
# executes it, passing in the parameters built above
# https://docs.microsoft.com/en-us/powershell/scripting/core-powershell/console/powershell.exe-command-line-help
ps = "& {%s}" % (tmp_script)
if params:
ps += " " + params
return self._run_ps(ps)
def _run_ps_or_raise(self, ps, error_msg):
response = self._run_ps(ps)
# response is a tuple: (status, result, None)
result = response[1]
if result["failed"]:
raise RuntimeError(
("{}:\n" "stdout = {}\n\n" "stderr = {}").format(
error_msg, result["stdout"], result["stderr"]
)
)
return result
def _multireplace(self, string, replacements):
"""
Given a string and a replacement map, it returns the replaced string.
Source = https://gist.github.com/bgusach/a967e0587d6e01e889fd1d776c5f3729
Reference = https://stackoverflow.com/questions/6116978/how-to-replace-multiple-substrings-of-a-string # noqa
:param str string: string to execute replacements on
:param dict replacements: replacement dictionary {value to find: value to replace}
:rtype: str
"""
# Place longer ones first to keep shorter substrings from matching where
# the longer ones should take place
# For instance given the replacements {'ab': 'AB', 'abc': 'ABC'} against
# the string 'hey abc', it should produce 'hey ABC' and not 'hey ABc'
substrs = sorted(replacements, key=len, reverse=True)
# Create a big OR regex that matches any of the substrings to replace
regexp = re.compile("|".join([re.escape(s) for s in substrs]))
# For each match, look up the new string in the replacements
return regexp.sub(lambda match: replacements[match.group(0)], string)
def _param_to_ps(self, param):
ps_str = ""
if param is None:
ps_str = "$null"
elif isinstance(param, six.string_types):
ps_str = '"' + self._multireplace(param, PS_ESCAPE_SEQUENCES) + '"'
elif isinstance(param, bool):
ps_str = "$true" if param else "$false"
elif isinstance(param, list):
ps_str = "@("
ps_str += ", ".join([self._param_to_ps(p) for p in param])
ps_str += ")"
elif isinstance(param, dict):
ps_str = "@{"
ps_str += "; ".join(
[
(self._param_to_ps(k) + " = " + self._param_to_ps(v))
for k, v in six.iteritems(param)
]
)
ps_str += "}"
else:
ps_str = str(param)
return ps_str
def _transform_params_to_ps(self, positional_args, named_args):
if positional_args:
for i, arg in enumerate(positional_args):
positional_args[i] = self._param_to_ps(arg)
if named_args:
for key, value in six.iteritems(named_args):
named_args[key] = self._param_to_ps(value)
return positional_args, named_args
def create_ps_params_string(self, positional_args, named_args):
# convert the script parameters into powershell strings
positional_args, named_args = self._transform_params_to_ps(
positional_args, named_args
)
# concatenate them into a long string
ps_params_str = ""
if named_args:
ps_params_str += " ".join(
[(k + " " + v) for k, v in six.iteritems(named_args)]
)
ps_params_str += " "
if positional_args:
ps_params_str += " ".join(positional_args)
return ps_params_str
| 39.438878
| 133
| 0.625457
|
33fe9e9be53a7fa23a4f7e9dfde20304cfd01402
| 443
|
py
|
Python
|
Ekeopara_Praise/Phase 2/LIST/Day39 Tasks/Task5.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 6
|
2020-05-23T19:53:25.000Z
|
2021-05-08T20:21:30.000Z
|
Ekeopara_Praise/Phase 2/LIST/Day39 Tasks/Task5.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 8
|
2020-05-14T18:53:12.000Z
|
2020-07-03T00:06:20.000Z
|
Ekeopara_Praise/Phase 2/LIST/Day39 Tasks/Task5.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 39
|
2020-05-10T20:55:02.000Z
|
2020-09-12T17:40:59.000Z
|
'''5. Write a Python program to count the number of strings where the string length is 2 or more and the first
and last character are same from a given list of strings.
Sample List : ['abc', 'xyz', 'aba', '1221']
Expected Result : 2'''
def match_words(words):
ctr = 0
for word in words:
if len(word) > 1 and word[0] == word[-1]:
ctr += 1
return ctr
print(match_words(['abc', 'xyz', 'aba', '1221']))
#Reference: w3resource
| 27.6875
| 111
| 0.654628
|
eec89efefb45f4e5ab988873cca7dc5426dfb502
| 373
|
py
|
Python
|
srcipts/requests/admins_get.py
|
GerasimovRM/Where-I-Am
|
58f6f0d1533421890f199dacabe523a447486b9f
|
[
"MIT"
] | null | null | null |
srcipts/requests/admins_get.py
|
GerasimovRM/Where-I-Am
|
58f6f0d1533421890f199dacabe523a447486b9f
|
[
"MIT"
] | null | null | null |
srcipts/requests/admins_get.py
|
GerasimovRM/Where-I-Am
|
58f6f0d1533421890f199dacabe523a447486b9f
|
[
"MIT"
] | null | null | null |
from requests import get, post
from pprint import pprint
from srcipts.requests.common import URL
tokens = post(f'{URL}/signin', json={'nickname': 'Roman',
'unhashed_password': 'сильныйпароль'}).json()
pprint(tokens)
headers = {'Authorization': f'Bearer {tokens["access_token"]}'}
pprint(get(f'{URL}/admins', headers=headers).json())
| 31.083333
| 82
| 0.659517
|
ff6852823c830cb630f713afb2f571f83c3b84e5
| 3,951
|
py
|
Python
|
python_modules/dagster/dagster/grpc/compile.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 4,606
|
2018-06-21T17:45:20.000Z
|
2022-03-31T23:39:42.000Z
|
python_modules/dagster/dagster/grpc/compile.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 6,221
|
2018-06-12T04:36:01.000Z
|
2022-03-31T21:43:05.000Z
|
python_modules/dagster/dagster/grpc/compile.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 619
|
2018-08-22T22:43:09.000Z
|
2022-03-31T22:48:06.000Z
|
"""Compile the proto definitions into Python.
This tooling should be invoked to regenerate the Python grpc artifacts by running:
python -m dagster.grpc.compile
"""
import os
import shutil
import subprocess
import sys
from dagster.utils import file_relative_path, safe_tempfile_path
PROTOS_DIR = file_relative_path(__file__, "protos")
PROTOS_PATH = os.path.join(PROTOS_DIR, "api.proto")
GENERATED_DIR = file_relative_path(__file__, "__generated__")
GENERATED_PB2_PATH = os.path.join(GENERATED_DIR, "api_pb2.py")
GENERATED_GRPC_PATH = os.path.join(GENERATED_DIR, "api_pb2_grpc.py")
ISORT_SETTINGS_PATH = file_relative_path(__file__, "../../../../")
GENERATED_HEADER = [
("# @" + "generated\n"), # This is to avoid matching the phab rule
"\n",
"# This file was generated by running `python -m dagster.grpc.compile`\n",
"# Do not edit this file directly, and do not attempt to recompile it using\n",
"# grpc_tools.protoc directly, as several changes must be made to the raw output\n",
"\n",
]
GENERATED_GRPC_PYLINT_DIRECTIVE = [
"# pylint: disable=no-member, unused-argument\n",
"\n",
]
GENERATED_PB2_PYLINT_DIRECTIVE = [
"# pylint: disable=protected-access,no-name-in-module\n",
"\n",
]
def protoc():
# python -m grpc_tools.protoc \
# -I protos --python_out __generated__ --grpc_python_out __generated__ protos/api.proto
_res = subprocess.check_output(
[
sys.executable,
"-m",
"grpc_tools.protoc",
"-I",
PROTOS_DIR,
"--python_out",
GENERATED_DIR,
"--grpc_python_out",
GENERATED_DIR,
PROTOS_PATH,
]
)
# The generated api_pb2_grpc.py file must be altered in two ways:
# 1. Add a pylint directive, `disable=no-member, unused-argument`
# 2. Change the import from `import api_pb2 as api__pb2` to `from . import api_pb2 as api__pb2`.
# See: https://github.com/grpc/grpc/issues/22914
with safe_tempfile_path() as tempfile_path:
shutil.copyfile(
GENERATED_GRPC_PATH,
tempfile_path,
)
with open(tempfile_path, "r") as generated:
with open(GENERATED_GRPC_PATH, "w") as rewritten:
for line in GENERATED_HEADER:
rewritten.write(line)
for line in GENERATED_GRPC_PYLINT_DIRECTIVE:
rewritten.write(line)
for line in generated.readlines():
if line == "import api_pb2 as api__pb2\n":
rewritten.write("from . import api_pb2 as api__pb2\n")
else:
rewritten.write(line)
with safe_tempfile_path() as tempfile_path:
shutil.copyfile(
GENERATED_PB2_PATH,
tempfile_path,
)
with open(tempfile_path, "r") as generated:
with open(GENERATED_PB2_PATH, "w") as rewritten:
for line in GENERATED_HEADER:
rewritten.write(line)
for line in GENERATED_PB2_PYLINT_DIRECTIVE:
rewritten.write(line)
for line in generated.readlines():
rewritten.write(line)
# We need to run black
_res = subprocess.check_output(
[
sys.executable,
"-m",
"black",
"-l",
"100",
"-t",
"py35",
"-t",
"py36",
"-t",
"py37",
"-t",
"py38",
GENERATED_DIR,
]
)
# And, finally, we need to run isort
_res = subprocess.check_output(
[
"isort",
"--settings-path",
ISORT_SETTINGS_PATH,
"-y",
GENERATED_PB2_PATH,
GENERATED_GRPC_PATH,
]
)
if __name__ == "__main__":
protoc()
| 28.630435
| 100
| 0.572766
|
aaf69b8dd0c1074846a14b8afb7d19deb478560a
| 51,629
|
py
|
Python
|
src/pmt/thirdparty/cihp_pgn/infer.py
|
fabioo29/physio-motion-transfer
|
b3bbbf98d6af0d79de8a29aa638b5c392a55d6f4
|
[
"MIT"
] | 2
|
2021-06-30T23:13:36.000Z
|
2022-01-05T19:42:52.000Z
|
src/pmt/thirdparty/cihp_pgn/infer.py
|
fabioo29/physio-motion-transfer
|
b3bbbf98d6af0d79de8a29aa638b5c392a55d6f4
|
[
"MIT"
] | null | null | null |
src/pmt/thirdparty/cihp_pgn/infer.py
|
fabioo29/physio-motion-transfer
|
b3bbbf98d6af0d79de8a29aa638b5c392a55d6f4
|
[
"MIT"
] | null | null | null |
import os
import tensorflow as tf
import numpy as np
from glob import glob
from PIL import Image
slim = tf.contrib.slim
N_CLASSES = 20
INPUT_RESIZE = 1080
label_colours = [(0,0,0)
, (128,0,0), (255,0,0), (0,85,0), (170,0,51), (255,85,0), (0,0,85), (0,119,221), (85,85,0), (0,85,85), (85,51,0), (52,86,128), (0,128,0)
, (0,0,255), (51,170,221), (0,255,255), (85,255,170), (170,255,85), (255,255,0), (255,170,0)]
DEFAULT_PADDING = 'SAME'
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
ckpt = tf.train.get_checkpoint_state(ckpt_path)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
saver.restore(sess, os.path.join(ckpt_path, ckpt_name))
#print("Restored model parameters from {}".format(ckpt_name))
return True
else:
return False
def layer(op):
'''Decorator for composable network layers.'''
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
def decode_labels(mask, num_images=1, num_classes=21):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
A batch with num_images RGB images of the same size as the input.
"""
n, h, w, c = mask.shape
assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
for j_, j in enumerate(mask[i, :, :, 0]):
for k_, k in enumerate(j):
if k < num_classes:
pixels[k_, j_] = label_colours[k]
outputs[i] = np.array(img)
return outputs
class Network(object):
def __init__(self, inputs, trainable=True, is_training=False, n_classes=20, keep_prob=1):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
# Switch variable for dropout
self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),
shape=[],
name='use_dropout')
self.setup(is_training, n_classes, keep_prob)
def setup(self, is_training, n_classes, keep_prob):
'''Construct the network. '''
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path).item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].iteritems():
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, str):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
'''Returns the current network output.'''
return self.terminals[-1]
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
'''Creates a new TensorFlow variable.'''
return tf.get_variable(name, shape, trainable=self.trainable)
def make_w_var(self, name, shape):
'''Creates a new TensorFlow variable.'''
stddev = 0.01
return tf.get_variable(name, shape, initializer=tf.truncated_normal_initializer(stddev=stddev), trainable=self.trainable)
def make_b_var(self, name, shape):
return tf.get_variable(name, shape, initializer=tf.constant_initializer(0.0), trainable=self.trainable)
def validate_padding(self, padding):
'''Verifies that the padding is one of the supported ones.'''
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
input,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding=DEFAULT_PADDING,
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = input.get_shape()[-1]
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_w_var('weights', shape=[k_h, k_w, c_i // group, c_o])
if group == 1:
# This is the common-case. Convolve the input without any further complications.
output = convolve(input, kernel)
else:
# Split the input into groups and then convolve each of them independently
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
# Concatenate the groups
output = tf.concat(3, output_groups)
# Add the biases
if biased:
biases = self.make_b_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def atrous_conv(self,
input,
k_h,
k_w,
c_o,
dilation,
name,
relu=True,
padding=DEFAULT_PADDING,
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = input.get_shape()[-1]
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.atrous_conv2d(i, k, dilation, padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_w_var('weights', shape=[k_h, k_w, c_i // group, c_o])
if group == 1:
# This is the common-case. Convolve the input without any further complications.
output = convolve(input, kernel)
else:
# Split the input into groups and then convolve each of them independently
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
# Concatenate the groups
output = tf.concat(3, output_groups)
# Add the biases
if biased:
biases = self.make_b_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def relu(self, input, name):
return tf.nn.relu(input, name=name)
@layer
def max_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.max_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def avg_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.avg_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def lrn(self, input, radius, alpha, beta, name, bias=1.0):
return tf.nn.local_response_normalization(input,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias,
name=name)
@layer
def concat(self, inputs, axis, name):
return tf.concat(values=inputs, axis=axis, name=name)
@layer
def add(self, inputs, name):
return tf.add_n(inputs, name=name)
@layer
def fc(self, input, num_out, name, relu=True):
with tf.variable_scope(name) as scope:
input_shape = input.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(input, [-1, dim])
else:
feed_in, dim = (input, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=scope.name)
return fc
@layer
def softmax(self, input, name):
input_shape = map(lambda v: v.value, input.get_shape())
if len(input_shape) > 2:
# For certain models (like NiN), the singleton spatial dimensions
# need to be explicitly squeezed, since they're not broadcast-able
# in TensorFlow's NHWC ordering (unlike Caffe's NCHW).
if input_shape[1] == 1 and input_shape[2] == 1:
input = tf.squeeze(input, squeeze_dims=[1, 2])
else:
raise ValueError('Rank 2 tensor input expected for softmax!')
return tf.nn.softmax(input, name)
@layer
def batch_normalization(self, input, name, is_training, activation_fn=None, scale=True):
with tf.variable_scope(name) as scope:
output = slim.batch_norm(
input,
activation_fn=activation_fn,
is_training=is_training,
updates_collections=None,
scale=scale,
scope=scope)
return output
@layer
def dropout(self, input, keep_prob, name):
keep = 1 - self.use_dropout + (self.use_dropout * keep_prob)
return tf.nn.dropout(input, keep, name=name)
@layer
def upsample(self, input, size_h, size_w, name):
with tf.variable_scope(name) as scope:
return tf.image.resize_images(input, size=[size_h, size_w])
@layer
def pyramid_pooling(self, input, o_c, pool_size, name):
with tf.variable_scope(name) as scope:
dims = tf.shape(input)
out_height, out_width = dims[1], dims[2]
pool_ly = tf.nn.avg_pool(input, ksize=[1, pool_size, pool_size, 1], strides=[1, pool_size, pool_size, 1],
padding=DEFAULT_PADDING, name='pool_ly')
weight = self.make_w_var('weights', shape=[3, 3, pool_ly.get_shape()[-1], o_c])
biases = self.make_var('biases', o_c)
conv_ly = tf.nn.conv2d(pool_ly, weight, strides=[1, 1, 1, 1], padding='SAME', name='conv_ly')
conv_ly = tf.nn.bias_add(conv_ly, biases)
conv_ly = tf.nn.relu(conv_ly, name='relu_ly')
output = tf.image.resize_bilinear(conv_ly, [out_height, out_width])
return output
class PGNModel(Network):
def setup(self, is_training, n_classes, keep_prob):
'''Network definition.
Args:
is_training: whether to update the running mean and variance of the batch normalisation layer.
If the batch size is small, it is better to keep the running mean and variance of
the-pretrained model frozen.
'''
(self.feed('data')
.conv(7, 7, 64, 2, 2, biased=False, relu=False, name='conv1')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn_conv1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn2a_branch1'))
(self.feed('pool1')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2a_branch2a')
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2a_branch2b')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn2a_branch2c'))
(self.feed('bn2a_branch1',
'bn2a_branch2c')
.add(name='res2a')
.relu(name='res2a_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2b_branch2a')
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2b_branch2b')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn2b_branch2c'))
(self.feed('res2a_relu',
'bn2b_branch2c')
.add(name='res2b')
.relu(name='res2b_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2c_branch2a')
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn2c_branch2b')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn2c_branch2c'))
(self.feed('res2b_relu',
'bn2c_branch2c')
.add(name='res2c')
.relu(name='res2c_relu')
.conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn3a_branch1'))
(self.feed('res2c_relu')
.conv(1, 1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3a_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3a_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn3a_branch2c'))
(self.feed('bn3a_branch1',
'bn3a_branch2c')
.add(name='res3a')
.relu(name='res3a_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b1_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b1_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b1_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b1_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b1_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn3b1_branch2c'))
(self.feed('res3a_relu',
'bn3b1_branch2c')
.add(name='res3b1')
.relu(name='res3b1_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b2_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b2_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b2_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b2_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b2_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn3b2_branch2c'))
(self.feed('res3b1_relu',
'bn3b2_branch2c')
.add(name='res3b2')
.relu(name='res3b2_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b3_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b3_branch2a')
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b3_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn3b3_branch2b')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b3_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn3b3_branch2c'))
(self.feed('res3b2_relu',
'bn3b3_branch2c')
.add(name='res3b3')
.relu(name='res3b3_relu')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch1')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4a_branch1'))
(self.feed('res3b3_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4a_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4a_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4a_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4a_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4a_branch2c'))
(self.feed('bn4a_branch1',
'bn4a_branch2c')
.add(name='res4a')
.relu(name='res4a_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b1_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b1_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b1_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b1_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b1_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b1_branch2c'))
(self.feed('res4a_relu',
'bn4b1_branch2c')
.add(name='res4b1')
.relu(name='res4b1_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b2_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b2_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b2_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b2_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b2_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b2_branch2c'))
(self.feed('res4b1_relu',
'bn4b2_branch2c')
.add(name='res4b2')
.relu(name='res4b2_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b3_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b3_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b3_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b3_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b3_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b3_branch2c'))
(self.feed('res4b2_relu',
'bn4b3_branch2c')
.add(name='res4b3')
.relu(name='res4b3_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b4_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b4_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b4_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b4_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b4_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b4_branch2c'))
(self.feed('res4b3_relu',
'bn4b4_branch2c')
.add(name='res4b4')
.relu(name='res4b4_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b5_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b5_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b5_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b5_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b5_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b5_branch2c'))
(self.feed('res4b4_relu',
'bn4b5_branch2c')
.add(name='res4b5')
.relu(name='res4b5_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b6_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b6_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b6_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b6_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b6_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b6_branch2c'))
(self.feed('res4b5_relu',
'bn4b6_branch2c')
.add(name='res4b6')
.relu(name='res4b6_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b7_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b7_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b7_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b7_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b7_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b7_branch2c'))
(self.feed('res4b6_relu',
'bn4b7_branch2c')
.add(name='res4b7')
.relu(name='res4b7_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b8_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b8_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b8_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b8_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b8_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b8_branch2c'))
(self.feed('res4b7_relu',
'bn4b8_branch2c')
.add(name='res4b8')
.relu(name='res4b8_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b9_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b9_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b9_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b9_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b9_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b9_branch2c'))
(self.feed('res4b8_relu',
'bn4b9_branch2c')
.add(name='res4b9')
.relu(name='res4b9_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b10_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b10_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b10_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b10_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b10_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b10_branch2c'))
(self.feed('res4b9_relu',
'bn4b10_branch2c')
.add(name='res4b10')
.relu(name='res4b10_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b11_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b11_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b11_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b11_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b11_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b11_branch2c'))
(self.feed('res4b10_relu',
'bn4b11_branch2c')
.add(name='res4b11')
.relu(name='res4b11_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b12_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b12_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b12_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b12_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b12_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b12_branch2c'))
(self.feed('res4b11_relu',
'bn4b12_branch2c')
.add(name='res4b12')
.relu(name='res4b12_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b13_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b13_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b13_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b13_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b13_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b13_branch2c'))
(self.feed('res4b12_relu',
'bn4b13_branch2c')
.add(name='res4b13')
.relu(name='res4b13_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b14_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b14_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b14_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b14_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b14_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b14_branch2c'))
(self.feed('res4b13_relu',
'bn4b14_branch2c')
.add(name='res4b14')
.relu(name='res4b14_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b15_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b15_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b15_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b15_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b15_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b15_branch2c'))
(self.feed('res4b14_relu',
'bn4b15_branch2c')
.add(name='res4b15')
.relu(name='res4b15_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b16_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b16_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b16_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b16_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b16_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b16_branch2c'))
(self.feed('res4b15_relu',
'bn4b16_branch2c')
.add(name='res4b16')
.relu(name='res4b16_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b17_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b17_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b17_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b17_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b17_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b17_branch2c'))
(self.feed('res4b16_relu',
'bn4b17_branch2c')
.add(name='res4b17')
.relu(name='res4b17_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b18_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b18_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b18_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b18_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b18_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b18_branch2c'))
(self.feed('res4b17_relu',
'bn4b18_branch2c')
.add(name='res4b18')
.relu(name='res4b18_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b19_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b19_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b19_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b19_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b19_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b19_branch2c'))
(self.feed('res4b18_relu',
'bn4b19_branch2c')
.add(name='res4b19')
.relu(name='res4b19_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b20_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b20_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b20_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b20_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b20_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b20_branch2c'))
(self.feed('res4b19_relu',
'bn4b20_branch2c')
.add(name='res4b20')
.relu(name='res4b20_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b21_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b21_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b21_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b21_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b21_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b21_branch2c'))
(self.feed('res4b20_relu',
'bn4b21_branch2c')
.add(name='res4b21')
.relu(name='res4b21_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b22_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b22_branch2a')
.atrous_conv(3, 3, 256, 2, padding='SAME', biased=False, relu=False, name='res4b22_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn4b22_branch2b')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b22_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn4b22_branch2c'))
(self.feed('res4b21_relu',
'bn4b22_branch2c')
.add(name='res4b22')
.relu(name='res4b22_relu'))
# Parsing networks
(self.feed('res4b22_relu')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch1')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn5a_branch1'))
(self.feed('res4b22_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5a_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5a_branch2a')
.atrous_conv(3, 3, 512, 4, padding='SAME', biased=False, relu=False, name='res5a_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5a_branch2b')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn5a_branch2c'))
(self.feed('bn5a_branch1',
'bn5a_branch2c')
.add(name='res5a')
.relu(name='res5a_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5b_branch2a')
.atrous_conv(3, 3, 512, 4, padding='SAME', biased=False, relu=False, name='res5b_branch2b')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5b_branch2b')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn5b_branch2c'))
(self.feed('res5a_relu',
'bn5b_branch2c')
.add(name='res5b')
.relu(name='res5b_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a')
.batch_normalization(is_training=is_training, activation_fn=tf.nn.relu, name='bn5c_branch2a')
.atrous_conv(3, 3, 512, 4, padding='SAME', biased=False, relu=False, name='res5c_branch2b')
.batch_normalization(activation_fn=tf.nn.relu, name='bn5c_branch2b', is_training=is_training)
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c')
.batch_normalization(is_training=is_training, activation_fn=None, name='bn5c_branch2c'))
(self.feed('res5b_relu',
'bn5c_branch2c')
.add(name='res5c')
.relu(name='res5c_relu'))
(self.feed('res3b3_relu')
.conv(3, 3, 512, 1, 1, biased=True, relu=True, name='parsing_branch1'))
(self.feed('res4b22_relu')
.conv(3, 3, 512, 1, 1, biased=True, relu=True, name='parsing_branch2'))
(self.feed('res5c_relu')
.conv(3, 3, 512, 1, 1, biased=True, relu=True, name='parsing_branch3'))
(self.feed('parsing_branch1', 'parsing_branch2', 'parsing_branch3')
.concat(axis=3, name='parsing_branch_concat')
.conv(3, 3, 512, 1, 1, biased=True, relu=True, name='parsing_branch'))
(self.feed('parsing_branch')
.pyramid_pooling(512, 60, name='parsing_pp1'))
(self.feed('parsing_branch')
.pyramid_pooling(512, 30, name='parsing_pp2'))
(self.feed('parsing_branch')
.pyramid_pooling(512, 20, name='parsing_pp3'))
(self.feed('parsing_branch')
.pyramid_pooling(512, 10, name='parsing_pp4'))
(self.feed('parsing_branch', 'parsing_pp1', 'parsing_pp2', 'parsing_pp3', 'parsing_pp4')
.concat(axis=3, name='parsing_pp_out')
.conv(3, 3, 512, 1, 1, biased=True, relu=True, name='parsing_pp_conv')
.dropout(keep_prob, name='parsing_pp_dropout')
.conv(3, 3, n_classes, 1, 1, biased=True, relu=False, name='parsing_fc'))
# Edge networks
(self.feed('res3b3_relu')
.conv(3, 3, 256, 1, 1, biased=True, relu=True, name='edge_branch1'))
(self.feed('res4b22_relu')
.conv(3, 3, 256, 1, 1, biased=True, relu=True, name='edge_branch2'))
(self.feed('res5c_relu')
.conv(3, 3, 256, 1, 1, biased=True, relu=True, name='edge_branch3'))
(self.feed('edge_branch1', 'edge_branch2', 'edge_branch3')
.concat(axis=3, name='edge_branch_concat')
.conv(3, 3, 256, 1, 1, biased=True, relu=True, name='edge_branch'))
(self.feed('edge_branch')
.pyramid_pooling(512, 60, name='edge_pp1'))
(self.feed('edge_branch')
.pyramid_pooling(512, 30, name='edge_pp2'))
(self.feed('edge_branch')
.pyramid_pooling(512, 20, name='edge_pp3'))
(self.feed('edge_branch')
.pyramid_pooling(512, 10, name='edge_pp4'))
(self.feed('edge_branch', 'edge_pp1', 'edge_pp2', 'edge_pp3', 'edge_pp4')
.concat(axis=3, name='edge_pp_out')
.conv(3, 3, 512, 1, 1, biased=True, relu=True, name='edge_pp_conv')
.dropout(keep_prob, name='edge_pp_dropout')
.conv(3, 3, 1, 1, 1, biased=True, relu=False, name='edge_fc'))
# Intermediate supervision
(self.feed('edge_branch3')
.conv(3, 3, 1, 1, 1, biased=True, relu=False, name='fc1_edge_c0_res5'))
(self.feed('edge_branch3')
.atrous_conv(3, 3, 1, 2, padding='SAME', relu=False, name='fc1_edge_c1_res5'))
(self.feed('edge_branch3')
.atrous_conv(3, 3, 1, 4, padding='SAME', relu=False, name='fc1_edge_c2_res5'))
(self.feed('edge_branch3')
.atrous_conv(3, 3, 1, 8, padding='SAME', relu=False, name='fc1_edge_c3_res5'))
(self.feed('edge_branch3')
.atrous_conv(3, 3, 1, 16, padding='SAME', relu=False, name='fc1_edge_c4_res5'))
(self.feed('fc1_edge_c0_res5', 'fc1_edge_c1_res5', 'fc1_edge_c2_res5', 'fc1_edge_c3_res5', 'fc1_edge_c4_res5')
.add(name='fc1_edge_res5'))
(self.feed('edge_branch2')
.conv(3, 3, 1, 1, 1, biased=True, relu=False, name='fc1_edge_c0_res4'))
(self.feed('edge_branch2')
.atrous_conv(3, 3, 1, 2, padding='SAME', relu=False, name='fc1_edge_c1_res4'))
(self.feed('edge_branch2')
.atrous_conv(3, 3, 1, 4, padding='SAME', relu=False, name='fc1_edge_c2_res4'))
(self.feed('edge_branch2')
.atrous_conv(3, 3, 1, 8, padding='SAME', relu=False, name='fc1_edge_c3_res4'))
(self.feed('edge_branch2')
.atrous_conv(3, 3, 1, 16, padding='SAME', relu=False, name='fc1_edge_c4_res4'))
(self.feed('fc1_edge_c0_res4', 'fc1_edge_c1_res4', 'fc1_edge_c2_res4', 'fc1_edge_c3_res4', 'fc1_edge_c4_res4')
.add(name='fc1_edge_res4'))
(self.feed('edge_branch1')
.conv(3, 3, 1, 1, 1, biased=True, relu=False, name='fc1_edge_c0_res3'))
(self.feed('edge_branch1')
.atrous_conv(3, 3, 1, 2, padding='SAME', relu=False, name='fc1_edge_c1_res3'))
(self.feed('edge_branch1')
.atrous_conv(3, 3, 1, 4, padding='SAME', relu=False, name='fc1_edge_c2_res3'))
(self.feed('edge_branch1')
.atrous_conv(3, 3, 1, 8, padding='SAME', relu=False, name='fc1_edge_c3_res3'))
(self.feed('edge_branch1')
.atrous_conv(3, 3, 1, 16, padding='SAME', relu=False, name='fc1_edge_c4_res3'))
(self.feed('fc1_edge_c0_res3', 'fc1_edge_c1_res3', 'fc1_edge_c2_res3', 'fc1_edge_c3_res3', 'fc1_edge_c4_res3')
.add(name='fc1_edge_res3'))
# Refine networks
(self.feed('parsing_pp_conv')
.conv(3, 3, 256, 1, 1, biased=True, relu=True, name='parsing_fea'))
(self.feed('parsing_fc')
.conv(1, 1, 128, 1, 1, biased=True, relu=True, name='parsing_remap'))
(self.feed('edge_pp_conv')
.conv(3, 3, 256, 1, 1, biased=True, relu=True, name='edge_fea'))
(self.feed('edge_fc')
.conv(1, 1, 128, 1, 1, biased=True, relu=True, name='edge_remap'))
(self.feed('parsing_fea', 'parsing_remap', 'edge_fea', 'edge_remap')
.concat(axis=3, name='parsing_rf_concat')
.conv(3, 3, 512, 1, 1, biased=True, relu=True, name='parsing_rf'))
(self.feed('parsing_rf')
.pyramid_pooling(512, 60, name='parsing_rf_pp1'))
(self.feed('parsing_rf')
.pyramid_pooling(512, 30, name='parsing_rf_pp2'))
(self.feed('parsing_rf')
.pyramid_pooling(512, 20, name='parsing_rf_pp3'))
(self.feed('parsing_rf')
.pyramid_pooling(512, 10, name='parsing_rf_pp4'))
(self.feed('parsing_rf', 'parsing_rf_pp1', 'parsing_rf_pp2', 'parsing_rf_pp3', 'parsing_rf_pp4')
.concat(axis=3, name='parsing_rf_out')
.conv(3, 3, 512, 1, 1, biased=True, relu=True, name='parsing_rf_conv')
.dropout(keep_prob, name='parsing_rf_dropout')
.conv(3, 3, n_classes, 1, 1, biased=True, relu=False, name='parsing_rf_fc'))
(self.feed('edge_fea', 'edge_remap', 'parsing_fea', 'parsing_remap')
.concat(axis=3, name='edge_rf_concat')
.conv(3, 3, 512, 1, 1, biased=True, relu=True, name='edge_rf'))
(self.feed('edge_rf')
.pyramid_pooling(512, 60, name='edge_rf_pp1'))
(self.feed('edge_rf')
.pyramid_pooling(512, 30, name='edge_rf_pp2'))
(self.feed('edge_rf')
.pyramid_pooling(512, 20, name='edge_rf_pp3'))
(self.feed('edge_rf')
.pyramid_pooling(512, 10, name='edge_rf_pp4'))
(self.feed('edge_rf', 'edge_rf_pp1', 'edge_rf_pp2', 'edge_rf_pp3', 'edge_rf_pp4')
.concat(axis=3, name='edge_rf_out')
.conv(3, 3, 512, 1, 1, biased=True, relu=True, name='edge_rf_conv')
.dropout(keep_prob, name='edge_rf_dropout')
.conv(3, 3, 1, 1, 1, biased=True, relu=False, name='edge_rf_fc'))
def main(input_dir, output_dir, checkpoint_dir):
# Create queue coordinator.
coord = tf.train.Coordinator()
# Load input
input_files = sorted(glob(os.path.join(input_dir, '*')))
input_queue = tf.train.slice_input_producer([tf.convert_to_tensor(input_files, dtype=tf.string)], shuffle=False)
img_contents = tf.io.read_file(input_queue[0])
img = tf.io.decode_jpeg(img_contents, channels=3)
# Resize to prevent OOM
img = tf.image.resize(img, [INPUT_RESIZE, INPUT_RESIZE], preserve_aspect_ratio=True)
img_r, img_g, img_b = tf.split(value=img, num_or_size_splits=3, axis=2)
image = tf.cast(tf.concat([img_b, img_g, img_r], 2), dtype=tf.float32)
# TODO: Subtract by mean (see image_reader)
image_rev = tf.reverse(image, tf.stack([1]))
image_batch = tf.stack([image, image_rev])
# Create network
with tf.variable_scope('', reuse=False):
net = PGNModel({'data': image_batch}, is_training=False, n_classes=N_CLASSES)
# parsing net
parsing_out1 = net.layers['parsing_fc']
parsing_out2 = net.layers['parsing_rf_fc']
# edge net
edge_out2 = net.layers['edge_rf_fc']
# combine resize
parsing_out1 = tf.image.resize_images(parsing_out1, tf.shape(image_batch)[1: 3, ])
parsing_out2 = tf.image.resize_images(parsing_out2, tf.shape(image_batch)[1: 3, ])
edge_out2 = tf.image.resize_images(edge_out2, tf.shape(image_batch)[1: 3, ])
raw_output = tf.reduce_mean(tf.stack([parsing_out1, parsing_out2]), axis=0)
head_output, tail_output = tf.unstack(raw_output, num=2, axis=0)
tail_list = tf.unstack(tail_output, num=20, axis=2)
tail_list_rev = [None] * 20
for xx in range(14):
tail_list_rev[xx] = tail_list[xx]
tail_list_rev[14] = tail_list[15]
tail_list_rev[15] = tail_list[14]
tail_list_rev[16] = tail_list[17]
tail_list_rev[17] = tail_list[16]
tail_list_rev[18] = tail_list[19]
tail_list_rev[19] = tail_list[18]
tail_output_rev = tf.stack(tail_list_rev, axis=2)
tail_output_rev = tf.reverse(tail_output_rev, tf.stack([1]))
raw_output_all = tf.reduce_mean(tf.stack([head_output, tail_output_rev]), axis=0)
raw_output_all = tf.expand_dims(raw_output_all, dim=0)
raw_output_all = tf.argmax(raw_output_all, axis=3)
pred_all = tf.expand_dims(raw_output_all, dim=3) # Create 4-d tensor.
raw_edge = tf.reduce_mean(tf.stack([edge_out2]), axis=0)
head_output, tail_output = tf.unstack(raw_edge, num=2, axis=0)
tail_output_rev = tf.reverse(tail_output, tf.stack([1]))
raw_edge_all = tf.reduce_mean(tf.stack([head_output, tail_output_rev]), axis=0)
raw_edge_all = tf.expand_dims(raw_edge_all, dim=0)
# Which variables to load.
restore_var = tf.global_variables()
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
sess.run(tf.local_variables_initializer())
# Load weights.
loader = tf.train.Saver(var_list=restore_var)
if not load(loader, sess, checkpoint_dir):
raise IOError('Checkpoint loading failed')
# Start queue threads.
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
# Iterate over training steps and output
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for input_file in input_files:
parsing_ = sess.run(pred_all)
img_id = os.path.splitext(os.path.basename(input_file))[0]
msk = decode_labels(parsing_, num_classes=N_CLASSES)
parsing_im = Image.fromarray(msk[0])
parsing_im.save('{}/{}.png'.format(output_dir, img_id))
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main('data/body_samples', 'output', 'assets')
| 51.992951
| 152
| 0.609735
|
80d3f723b648a84a21f8d661e45c1c2e0c528bdd
| 6,964
|
py
|
Python
|
test/functional/rpc_getblockstats.py
|
Frank-GER/syscoin
|
efbdac67f9d3d37d75de3480b8bb2c539ad05dd4
|
[
"MIT"
] | 61
|
2016-03-09T10:42:05.000Z
|
2018-03-13T05:06:30.000Z
|
test/functional/rpc_getblockstats.py
|
Frank-GER/syscoin
|
efbdac67f9d3d37d75de3480b8bb2c539ad05dd4
|
[
"MIT"
] | 153
|
2016-02-29T17:45:10.000Z
|
2018-03-16T23:37:02.000Z
|
test/functional/rpc_getblockstats.py
|
Frank-GER/syscoin
|
efbdac67f9d3d37d75de3480b8bb2c539ad05dd4
|
[
"MIT"
] | 18
|
2016-03-02T21:50:44.000Z
|
2018-03-07T20:36:12.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(SyscoinTestFramework):
start_height = 101
max_stat_pos = 2
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = 1525107225
self.nodes[0].setmocktime(mocktime)
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
address = self.nodes[0].get_deterministic_priv_key().address
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.generate(self.nodes[0], 1)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=False)
self.nodes[0].settxfee(amount=0.003)
self.nodes[0].sendtoaddress(address=address, amount=1, subtractfeefromamount=True)
self.sync_all()
self.generate(self.nodes[0], 1, sync_fun=self.no_op)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.sync_all()
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
self.sync_all()
stats = self.get_stats()
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
# Invalid number of args
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats, '00', 1, 2)
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats)
if __name__ == '__main__':
GetblockstatsTest().main()
| 41.700599
| 121
| 0.627944
|
9c9ff6929b6ec873f836d992d30150a81e27a6a6
| 2,280
|
py
|
Python
|
generated-libraries/python/netapp/ntdtest/group4_stats_info.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/ntdtest/group4_stats_info.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/ntdtest/group4_stats_info.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
from netapp.netapp_object import NetAppObject
class Group4StatsInfo(NetAppObject):
"""
4th nested typedef at level 1
"""
_field_16 = None
@property
def field_16(self):
"""
Dummy/Generic Field 16 accepts any string
Attributes: non-creatable, non-modifiable
"""
return self._field_16
@field_16.setter
def field_16(self, val):
if val != None:
self.validate('field_16', val)
self._field_16 = val
_field_14 = None
@property
def field_14(self):
"""
Dummy/Generic Field 14 accepts any string
Attributes: non-creatable, non-modifiable
"""
return self._field_14
@field_14.setter
def field_14(self, val):
if val != None:
self.validate('field_14', val)
self._field_14 = val
_field_15 = None
@property
def field_15(self):
"""
Dummy/Generic Field 15 accepts any string
Attributes: non-creatable, non-modifiable
"""
return self._field_15
@field_15.setter
def field_15(self, val):
if val != None:
self.validate('field_15', val)
self._field_15 = val
_field_13 = None
@property
def field_13(self):
"""
Dummy/Generic Field 13 accepts any string
Attributes: non-creatable, non-modifiable
"""
return self._field_13
@field_13.setter
def field_13(self, val):
if val != None:
self.validate('field_13', val)
self._field_13 = val
@staticmethod
def get_api_name():
return "group4-stats-info"
@staticmethod
def get_desired_attrs():
return [
'field-16',
'field-14',
'field-15',
'field-13',
]
def describe_properties(self):
return {
'field_16': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'field_14': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'field_15': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'field_13': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| 27.142857
| 90
| 0.555702
|
b87831eecb5eb6278e2387725e2f12fff3a4339c
| 23,777
|
py
|
Python
|
python/friesian/example/dien/utils.py
|
Forest216/BigDL
|
840da9a2eaf395978dd83730b02aa5e5dfbd7989
|
[
"Apache-2.0"
] | null | null | null |
python/friesian/example/dien/utils.py
|
Forest216/BigDL
|
840da9a2eaf395978dd83730b02aa5e5dfbd7989
|
[
"Apache-2.0"
] | null | null | null |
python/friesian/example/dien/utils.py
|
Forest216/BigDL
|
840da9a2eaf395978dd83730b02aa5e5dfbd7989
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Modifications copyright (C) 2018 Alibaba Group
# Modifications copyright 2016 The BigDL Authors.
# ========================================================
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import *
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
from tensorflow.python.ops import nn_ops
from bigdl.dllib.utils.log4Error import *
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
class _Linear(object):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of weight variable.
dtype: data type for variables.
build_bias: boolean, whether to build a bias variable.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Raises:
ValueError: if inputs_shape is wrong.
"""
def __init__(self,
args,
output_size,
build_bias,
bias_initializer=None,
kernel_initializer=None):
self._build_bias = build_bias
if args is None or (nest.is_sequence(args) and not args):
invalidInputError(False, "`args` must be specified")
if not nest.is_sequence(args):
args = [args]
self._is_sequence = False
else:
self._is_sequence = True
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
invalidInputError(False, "linear is expecting 2D arguments: %s" % shapes)
if tf.__version__[0] == '1':
if shape[1].value is None:
invalidInputError(False,
"linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
elif tf.__version__[0] == '2':
if shape[1] is None:
invalidInputError(False,
"linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1]
dtype = [a.dtype for a in args][0]
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
self._weights = vs.get_variable(
_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size],
dtype=dtype,
initializer=kernel_initializer)
if build_bias:
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
self._biases = vs.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=bias_initializer)
def __call__(self, args):
if not self._is_sequence:
args = [args]
if len(args) == 1:
res = math_ops.matmul(args[0], self._weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), self._weights)
if self._build_bias:
res = nn_ops.bias_add(res, self._biases)
return res
class QAAttGRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
Args:
num_units: int, The number of units in the GRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
kernel_initializer: (optional) The initializer to use for the weight and
projection matrices.
bias_initializer: (optional) The initializer to use for the bias.
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None):
super(QAAttGRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or math_ops.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._gate_linear = None
self._candidate_linear = None
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, att_score):
return self.call(inputs, state, att_score)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = math_ops.sigmoid(self._gate_linear([inputs, state]))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
if self._candidate_linear is None:
with vs.variable_scope("candidate"):
self._candidate_linear = _Linear(
[inputs, r_state],
self._num_units,
True,
bias_initializer=self._bias_initializer,
kernel_initializer=self._kernel_initializer)
c = self._activation(self._candidate_linear([inputs, r_state]))
new_h = (1. - att_score) * state + att_score * c
return new_h, new_h
class VecAttGRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
Args:
num_units: int, The number of units in the GRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
kernel_initializer: (optional) The initializer to use for the weight and
projection matrices.
bias_initializer: (optional) The initializer to use for the bias.
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None):
super(VecAttGRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or math_ops.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._gate_linear = None
self._candidate_linear = None
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, att_score):
return self.call(inputs, state, att_score)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = math_ops.sigmoid(self._gate_linear([inputs, state]))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
if self._candidate_linear is None:
with vs.variable_scope("candidate"):
self._candidate_linear = _Linear(
[inputs, r_state],
self._num_units,
True,
bias_initializer=self._bias_initializer,
kernel_initializer=self._kernel_initializer)
c = self._activation(self._candidate_linear([inputs, r_state]))
u = (1.0 - att_score) * u
new_h = u * state + (1 - u) * c
return new_h, new_h
def prelu(_x, scope=''):
"""parametric ReLU activation"""
with tf.compat.v1.variable_scope(name_or_scope=scope, default_name="prelu"):
_alpha = tf.compat.v1.get_variable("prelu_" + scope, shape=_x.get_shape()[-1],
dtype=_x.dtype, initializer=tf.constant_initializer(0.1))
return tf.maximum(tf.cast(0.0, _x.dtype), _x) + _alpha * tf.minimum(tf.cast(0.0, _x.dtype),
_x)
def calc_auc(raw_arr):
"""Summary
Args:
raw_arr (TYPE): Description
Returns:
TYPE: Description
"""
arr = sorted(raw_arr, key=lambda d: d[0], reverse=True)
pos, neg = 0., 0.
for record in arr:
if record[1] == 1.:
pos += 1
else:
neg += 1
fp, tp = 0., 0.
xy_arr = []
for record in arr:
if record[1] == 1.:
tp += 1
else:
fp += 1
xy_arr.append([fp / neg, tp / pos])
auc = 0.
prev_x = 0.
prev_y = 0.
for x, y in xy_arr:
if x != prev_x:
auc += ((x - prev_x) * (y + prev_y) / 2.)
prev_x = x
prev_y = y
return auc
def attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1,
time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
mask = tf.equal(mask, tf.ones_like(mask))
hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
input_size = query.get_shape().as_list()[-1]
# Trainable parameters
w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))
w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1))
b = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
with tf.name_scope('v'):
# Applying fully connected layer with non-linear activation to each of the B*T timestamps;
# the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
tmp1 = tf.tensordot(facts, w1, axes=1)
tmp2 = tf.tensordot(query, w2, axes=1)
tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])
tmp = tf.tanh((tmp1 + tmp2) + b)
# For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector
v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape
key_masks = mask # [B, 1, T]
# key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1)
v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T]
alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape
# Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape
# output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1)
output = facts * tf.expand_dims(alphas, -1)
output = tf.reshape(output, tf.shape(facts))
# output = output / (facts.get_shape().as_list()[-1] ** 0.5)
if not return_alphas:
return output
else:
return output, alphas
def din_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1,
time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
print("querry_size mismatch")
query = tf.concat(values=[
query,
query,
], axis=1)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries - facts, queries * facts], axis=-1)
d_layer_1_all = tf.compat.v1.layers.dense(din_all, 80, activation=tf.nn.sigmoid,
name='f1_att' + stag)
d_layer_2_all = tf.compat.v1.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid,
name='f2_att' + stag)
d_layer_3_all = tf.compat.v1.layers.dense(d_layer_2_all, 1, activation=None,
name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Scale
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
return output
def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1,
time_major=False, return_alphas=False, forCnn=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.compat.v1.layers.dense(query, facts_size, activation=None, name='f1' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries - facts, queries * facts], axis=-1)
d_layer_1_all = tf.compat.v1.layers.dense(din_all, 80, activation=tf.nn.sigmoid,
name='f1_att' + stag)
d_layer_2_all = tf.compat.v1.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid,
name='f2_att' + stag)
d_layer_3_all = tf.compat.v1.layers.dense(d_layer_2_all, 1, activation=None,
name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
if not forCnn:
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Scale
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
def self_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i + 1, :],
ATTENTION_SIZE, mask[:, 0:i + 1], softmax_stag=1,
stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
element_shape=(facts[:, 0, :].get_shape()))
_, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])
self_attention = output_op.stack()
self_attention = tf.transpose(self_attention, perm=[1, 0, 2])
return self_attention
def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch,
ATTENTION_SIZE, mask, softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
element_shape=(facts[:, 0, :].get_shape()))
_, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])
self_attention = output_op.stack()
self_attention = tf.transpose(self_attention, perm=[1, 0, 2])
return self_attention
def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1,
time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.compat.v1.layers.dense(query, facts_size, activation=None,
name='f1_trans_shine' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries - facts, queries * facts], axis=-1)
d_layer_1_all = tf.compat.v1.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid,
name='f1_shine_att' + stag)
d_layer_2_all = tf.compat.v1.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid,
name='f2_shine_att' + stag)
d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts))
output = d_layer_2_all
return output
def dice(_x, axis=-1, epsilon=0.000000001, name='', data_type=tf.float32):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
alphas = tf.get_variable('alpha' + name, _x.get_shape()[-1],
initializer=tf.constant_initializer(0.0),
dtype=data_type)
input_shape = list(_x.get_shape())
reduction_axes = list(range(len(input_shape)))
del reduction_axes[axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[axis] = input_shape[axis]
# case: train mode (uses stats of the current batch)
mean = tf.reduce_mean(_x, axis=reduction_axes)
brodcast_mean = tf.reshape(mean, broadcast_shape)
std = tf.reduce_mean(tf.square(_x - brodcast_mean) + epsilon, axis=reduction_axes)
std = tf.sqrt(std)
brodcast_std = tf.reshape(std, broadcast_shape)
x_normed = (_x - brodcast_mean) / (brodcast_std + epsilon)
# x_normed = tf.layers.batch_normalization(_x, center=False, scale=False)
x_p = tf.sigmoid(x_normed)
return alphas * (1.0 - x_p) * _x + x_p * _x
def parametric_relu(_x):
alphas = tf.get_variable('alpha', _x.get_shape()[-1],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
pos = tf.nn.relu(_x)
neg = alphas * (_x - abs(_x)) * 0.5
return pos + neg
| 40.505963
| 100
| 0.593304
|
6eb29e19e1e98b8a82bfd8dde2537fe123954f96
| 2,764
|
py
|
Python
|
tests/test_rfc4059.py
|
inexio/pyasn1-modules
|
13b84f74541ec442037273ddf8ba62bbba2cd974
|
[
"BSD-2-Clause"
] | 2
|
2020-12-29T07:13:05.000Z
|
2021-02-07T15:32:26.000Z
|
tests/test_rfc4059.py
|
inexio/pyasn1-modules
|
13b84f74541ec442037273ddf8ba62bbba2cd974
|
[
"BSD-2-Clause"
] | 3
|
2020-12-22T23:21:43.000Z
|
2021-04-06T16:24:39.000Z
|
tests/test_rfc4059.py
|
inexio/pyasn1-modules
|
13b84f74541ec442037273ddf8ba62bbba2cd974
|
[
"BSD-2-Clause"
] | 1
|
2021-01-17T17:45:03.000Z
|
2021-01-17T17:45:03.000Z
|
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2021, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc4059
class WarrantyCertificateTestCase(unittest.TestCase):
pem_text = """\
MIIC7DCCAnKgAwIBAgIJAKWzVCgbsG5OMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
dXMgQ0EwHhcNMjEwMTMwMTc0ODMyWhcNMjIwMTMwMTc0ODMyWjBuMQswCQYDVQQG
EwJVUzELMAkGA1UECBMCQ0ExETAPBgNVBAcTCFNhbiBKb3NlMSAwHgYDVQQKExdC
b2d1cyBDb21tZXJjZSBTZXJ2aWNlczEdMBsGA1UEAxMUY29tbWVyY2UuZXhhbXBs
ZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASkrm8xpNVoCIOngV5bYdgp+o65
QBsYW4enstEkzfDz4ol4/NiF0IeFAKc3dZlTzk5DK3QldD46TEa8BUU5FiDVoZWj
9SnUbAP5qpHpbdH5m0wGmdZ3WY4Pwm5KTl8XX3CjggEJMIIBBTALBgNVHQ8EBAMC
B4AwQgYJYIZIAYb4QgENBDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRy
dXN0ZWQgZm9yIGFueSBwdXJwb3NlLjAJBgNVHRMEAjAAMB0GA1UdDgQWBBS/dszg
PxHYt3vq+ckyOek0e4OpcDAfBgNVHSMEGDAWgBTyNds0BNqlVfK9aQOZsGLs4hUI
wTBnBggrBgEFBQcBEARbMFkwEwUAMAwCAgNIAgNKC0YCAQICAQAWQmh0dHBzOi8v
aW1nLmh1ZmZpbmd0b25wb3N0LmNvbS9hc3NldC81NWE2NzAyZDEyMDAwMDJiMDAx
MzRhZGQuanBlZzAKBggqhkjOPQQDAwNoADBlAjEAjweTyuXOCzWYRNwBXk+tM8/r
X/kfGlB5igFOcTuTrQJwJgQpdt5oGVXzwBgrAckDAjBbQJzl+k9IhBFYvBwmlmTj
SNZvBmsBe5D+PlZZF/XpJ21bf6HPAGkBMMDNPdTdKXk=
"""
def setUp(self):
self.asn1Spec = rfc5280.Certificate()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
found = False
for extn in asn1Object['tbsCertificate']['extensions']:
if extn['extnID'] == rfc4059.id_pe_warranty_extn:
self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
ev, rest = der_decoder(extn['extnValue'],
asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
self.assertFalse(rest)
self.assertTrue(ev.prettyPrint())
self.assertEqual(extn['extnValue'], der_encoder(ev))
self.assertEqual(840, ev['wData']['base']['amount']['currency'])
found = True
self.assertTrue(found)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
| 39.485714
| 80
| 0.787627
|
da1901e5247726ada17f4669d26678c5d36217f0
| 8,587
|
py
|
Python
|
backbones/iresnet.py
|
fdbtrs/mixfacenets
|
ca3ef306b7e05cfab94f48e753d5aac248281032
|
[
"MIT"
] | 48
|
2021-07-26T13:09:53.000Z
|
2022-02-23T11:41:39.000Z
|
backbones/iresnet.py
|
Gaurav14cs17/mixfacenets
|
ea7fe3f65a2a00dfc529ef21bfc7d279fdaf58b8
|
[
"MIT"
] | 5
|
2021-08-02T10:19:15.000Z
|
2022-01-14T13:46:17.000Z
|
backbones/iresnet.py
|
Gaurav14cs17/mixfacenets
|
ea7fe3f65a2a00dfc529ef21bfc7d279fdaf58b8
|
[
"MIT"
] | 10
|
2021-07-29T09:45:07.000Z
|
2021-12-07T00:39:48.000Z
|
import torch
from torch import nn
__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100']
from backbones.mixnetm import _calc_width, count_model_flops
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return input * x
class IBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1,use_se=False):
super(IBasicBlock, self).__init__()
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)
self.conv1 = conv3x3(inplanes, planes)
self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,)
self.prelu = nn.PReLU(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,)
self.downsample = downsample
self.stride = stride
self.use_se=use_se
if (use_se):
self.se_block=SEModule(planes,16)
def forward(self, x):
identity = x
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn3(out)
if(self.use_se):
out=self.se_block(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
class IResNet(nn.Module):
fc_scale = 7 * 7
def __init__(self,
block, layers, dropout=0, num_features=512, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False):
super(IResNet, self).__init__()
self.fp16 = fp16
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05)
self.prelu = nn.PReLU(self.inplanes)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
self.layer2 = self._make_layer(block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block,
512,
layers[3],
stride=2,
dilate=replace_stride_with_dilation[2])
self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,)
self.dropout = nn.Dropout(p=dropout, inplace=True)
self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features)
self.features = nn.BatchNorm1d(num_features, eps=1e-05)
nn.init.constant_(self.features.weight, 1.0)
self.features.weight.requires_grad = False
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, 0, 0.1)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, IBasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation))
return nn.Sequential(*layers)
def forward(self, x):
with torch.cuda.amp.autocast(self.fp16):
x = self.conv1(x)
x = self.bn1(x)
x = self.prelu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn2(x)
x = torch.flatten(x, 1)
x = self.dropout(x)
x = self.fc(x.float() if self.fp16 else x)
x = self.features(x)
return x
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
model = IResNet(block, layers, **kwargs)
if pretrained:
raise ValueError()
return model
def iresnet18(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained,
progress, **kwargs)
def iresnet34(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained,
progress, **kwargs)
def iresnet50(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained,
progress, **kwargs)
def iresnet100(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained,
progress, **kwargs)
def _test():
import torch
pretrained = False
models = [
iresnet100
]
for model in models:
net = model()
print(net)
# net.train()
weight_count = _calc_width(net)
flops=count_model_flops(net)
print("m={}, {}".format(model.__name__, weight_count))
print("m={}, {}".format(model.__name__, flops))
#assert (model != mixnet_s or weight_count == 4134606)
#assert (model != mixnet_m or weight_count == 5014382)
#assert (model != mixnet_l or weight_count == 7329252)
net.eval()
x = torch.randn(1, 3, 112, 112)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 512))
if __name__ == "__main__":
_test()
| 35.92887
| 99
| 0.549668
|
583e5473642c271dd51ab99d0a8fe0423388e86b
| 3,516
|
py
|
Python
|
onnxmltools/convert/sklearn/shape_calculators/LinearClassifier.py
|
weikexin/onnxmltools
|
b5ea8a43bb0abf5ca23f0913dc2d9ea11b9724b1
|
[
"MIT"
] | null | null | null |
onnxmltools/convert/sklearn/shape_calculators/LinearClassifier.py
|
weikexin/onnxmltools
|
b5ea8a43bb0abf5ca23f0913dc2d9ea11b9724b1
|
[
"MIT"
] | null | null | null |
onnxmltools/convert/sklearn/shape_calculators/LinearClassifier.py
|
weikexin/onnxmltools
|
b5ea8a43bb0abf5ca23f0913dc2d9ea11b9724b1
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import numpy as np
import six, numbers
from ...common._registration import register_shape_calculator
from ...common.data_types import Int64TensorType, FloatTensorType, StringTensorType, DictionaryType
from ...common.utils import check_input_and_output_numbers, check_input_and_output_types
def calculate_sklearn_linear_classifier_output_shapes(operator):
'''
This operator maps an input feature vector into a scalar label if the number of outputs is one. If two outputs
appear in this operator's output list, we should further generate a map storing all classes' probabilities.
Allowed input/output patterns are
1. [1, C] ---> [1, 1], Map
2. [N, C] ---> [N, 1], A sequence of map
Note that the second case is not allowed as long as ZipMap only produces dictionary.
'''
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=[1, 2])
check_input_and_output_types(operator, good_input_types=[FloatTensorType, Int64TensorType])
if len(operator.inputs[0].type.shape) != 2:
raise RuntimeError('Input must be a [N, C]-tensor')
N = operator.inputs[0].type.shape[0]
if N != 1:
raise ValueError('Currently we only support one example per batch')
class_labels = operator.raw_operator.classes_
if all(isinstance(i, np.ndarray) for i in class_labels):
class_labels = np.concatenate(class_labels)
if all(isinstance(i, (six.string_types, six.text_type)) for i in class_labels):
operator.outputs[0].type = StringTensorType(shape=[1, 1])
if len(class_labels) > 2 or operator.type != 'SklearnLinearSVC':
# For multi-class classifier, we produce a map for encoding the probabilities of all classes
operator.outputs[1].type = DictionaryType(StringTensorType([1]), FloatTensorType([1]))
else:
# For binary classifier, we produce the probability of the positive class
operator.outputs[1].type = FloatTensorType(shape=[1, 1])
elif all(isinstance(i, (numbers.Real, bool, np.bool_)) for i in class_labels):
operator.outputs[0].type = Int64TensorType(shape=[1, 1])
if len(class_labels) > 2 or operator.type != 'SklearnLinearSVC':
# For multi-class classifier, we produce a map for encoding the probabilities of all classes
operator.outputs[1].type = DictionaryType(Int64TensorType([1]), FloatTensorType([1]))
else:
# For binary classifier, we produce the probability of the positive class
operator.outputs[1].type = FloatTensorType(shape=[1, 1])
else:
raise ValueError('Unsupported or mixed label types')
register_shape_calculator('SklearnLinearClassifier', calculate_sklearn_linear_classifier_output_shapes)
register_shape_calculator('SklearnLinearSVC', calculate_sklearn_linear_classifier_output_shapes)
register_shape_calculator('SklearnDecisionTreeClassifier', calculate_sklearn_linear_classifier_output_shapes)
register_shape_calculator('SklearnRandomForestClassifier', calculate_sklearn_linear_classifier_output_shapes)
register_shape_calculator('SklearnGradientBoostingClassifier', calculate_sklearn_linear_classifier_output_shapes)
| 55.809524
| 114
| 0.713026
|
d96fd991e3e16c561267b3912950392f1e3067ec
| 307
|
py
|
Python
|
pyecharts/charts/three_axis_charts/line3D.py
|
CharileWithZoe/pyecharts
|
dbded9a8932cc13840b7d130802176fd88a97bf8
|
[
"MIT"
] | 11,032
|
2017-12-21T01:21:38.000Z
|
2022-03-31T23:02:38.000Z
|
pyecharts/charts/three_axis_charts/line3D.py
|
reed-hong/pyecharts
|
5f01f2fb43d1602a46d77234721450008cbff7eb
|
[
"MIT"
] | 1,687
|
2017-12-21T02:10:47.000Z
|
2022-03-31T14:31:45.000Z
|
pyecharts/charts/three_axis_charts/line3D.py
|
reed-hong/pyecharts
|
5f01f2fb43d1602a46d77234721450008cbff7eb
|
[
"MIT"
] | 2,528
|
2017-12-21T07:57:52.000Z
|
2022-03-30T15:34:51.000Z
|
from ... import types
from ...charts.chart import ThreeAxisChart
from ...options import InitOpts
class Line3D(ThreeAxisChart):
"""
<<< 3D Line-Chart >>>
"""
def __init__(self, init_opts: types.Init = InitOpts()):
super().__init__(init_opts)
self._3d_chart_type = "line3D"
| 21.928571
| 59
| 0.644951
|
47437ecb2d68c8018ad981a71be07b9da3bfb4be
| 9,346
|
py
|
Python
|
etcviz/etc.py
|
kbsezginel/etcviz
|
3a10e1fbfe0e2b032e87b20a58386c412f59ff28
|
[
"BSD-3-Clause"
] | 1
|
2021-11-17T15:00:14.000Z
|
2021-11-17T15:00:14.000Z
|
etcviz/etc.py
|
kbsezginel/etcviz
|
3a10e1fbfe0e2b032e87b20a58386c412f59ff28
|
[
"BSD-3-Clause"
] | null | null | null |
etcviz/etc.py
|
kbsezginel/etcviz
|
3a10e1fbfe0e2b032e87b20a58386c412f59ff28
|
[
"BSD-3-Clause"
] | null | null | null |
"""
ETC object.
"""
import os
import math
import random
import shutil
import importlib
from copy import deepcopy
import pygame
from .mode import Mode
from .tools import read_csv, write_csv
class ETC:
def __init__(self, mode_dir, scenes=None, resolution=(1280, 720)):
"""
Initialize a ETC object.
"""
self.knob1 = 0.5
self.knob2 = 0.5
self.knob3 = 0.5
self.knob4 = 0.5
self.knob5 = 0.5
self.knob_step = 0.01
self.audio_in = [random.randint(-32768, 32767) for i in range(100)]
self.bg_color = (0, 0, 0)
self.audio_trig = False
self.audio_level = 0.5
self.midi_note_new = False
self.resolution = resolution
self.screen = pygame.display.set_mode(self.resolution)
self.help, self.levels = True, True
self.wd = "etctmp"
self.saved_scenes = []
if scenes is not None:
self.read_scenes(scenes, mode_dir)
else:
self.read_modes(mode_dir)
self.init_workdir()
self.mode_index = 0
self.setup_mode()
def init_workdir(self):
"""
Initialize a temporary work directory for ETC to use importlib.
"""
# Create workdir if not exists (delete if exists)
if os.path.exists(self.wd):
shutil.rmtree(self.wd)
os.mkdir(self.wd)
# Copy all mode files
for mode in self.modes:
mode.root = os.path.join(self.wd, mode.name)
mode.libname = f"{self.wd}.{mode.name}.main"
if not os.path.exists(mode.root):
shutil.copytree(mode.dir, mode.root)
def read_modes(self, mode):
"""
Read mode(s) from a given directory.
Initially checks if there is a 'main.py' file in the given directory.
If there is only loads that mode, if not checks each directory
in given directory and collects the ones with 'main.py' files.
"""
self.modes = []
modes_list = os.listdir(mode)
if "main.py" in modes_list:
self.modes = [Mode(mode)]
if self.modes == []:
for m in modes_list:
mdir = os.path.join(mode, m)
if os.path.isdir(mdir):
if "main.py" in os.listdir(mdir):
self.modes.append(Mode(mdir))
def setup_mode(self):
"""
Load mode and setup display.
"""
self.mode = importlib.import_module(self.modes[self.mode_index].libname)
self.mode_root = self.modes[self.mode_index].root
self.mode.setup(self.screen, self)
self.update_knobs(self.modes[self.mode_index].knobs)
print(f"Load mode {self.mode_index + 1} / {len(self.modes)} : {self.modes[self.mode_index].name}")
print(f"Knobs: {self.modes[self.mode_index].knobs}")
def load_next_mode(self):
"""
Load next mode in the list.
"""
self.mode_index += 1
if self.mode_index >= len(self.modes):
self.mode_index = 0
self.setup_mode()
def load_previous_mode(self):
"""
Load previous mode in the list.
"""
self.mode_index -= 1
if self.mode_index <= 0:
self.mode_index = len(self.modes) - 1
self.setup_mode()
def save_mode(self):
"""
Save current mode with knob settings.
"""
mode = deepcopy(self.modes[self.mode_index])
mode.knobs = {i: getattr(self, f"knob{i}") for i in range(1, 6)}
self.saved_scenes.append(mode)
print(f"Saved mode {mode.name} | {len(self.saved_scenes)}")
def update_knobs(self, knobs):
"""
Update knobs from a dictionary.
"""
for knob_id in range(1, 6):
setattr(self, f"knob{knob_id}", knobs[knob_id])
def read_scenes(self, filename, mode_dir):
"""
Read ETC Scenes.csv file modes
"""
scenes = read_csv(filename)
print(f'Reading scenes file: {filename}')
self.modes = []
for idx, scene in enumerate(scenes):
knobs = {i: float(scene[i]) for i in range(1, 6)}
mode = Mode(os.path.join(mode_dir, scene[0]), knobs=knobs)
self.modes.append(mode)
def write_scenes(self, filename):
"""
Write saved scenes to Scenes.csv file
"""
rows = []
for m in self.saved_scenes:
rows.append([m.name] + [m.knobs[i] for i in range(1, 6)] + [True])
write_csv(filename, rows)
print(f"Saved scenes to {filename}")
def display_levels(self):
"""
Display knob values and audio level
"""
if self.levels:
xpos, ypos = 10, 300
text_color = (200, 200, 200)
font_name = pygame.font.match_font('couriernew')
title_font = pygame.font.Font(font_name, 24)
title_text = title_font.render("Levels (l)", True, text_color)
self.screen.blit(title_text, (xpos, ypos))
font = pygame.font.Font(font_name, 20)
levels = ["audio_level", "knob1", "knob2", "knob3", "knob4", "knob5"]
for idx, l in enumerate(levels, start=1):
text = font.render(f"{idx - 1} {l}: {round(getattr(self, l), 2)}", True, text_color)
self.screen.blit(text, (xpos, ypos + 5 + idx * 20))
def display_help(self):
"""
Display help message for usage instructions
"""
text_color = (200, 200, 200)
xpos, ypos = 10, 10
font_name = pygame.font.match_font('couriernew')
instructions = ["space: toggle audio",
"left/right arrow: switch mode",
"1 - 5 + up/down arrow: change knob 1 - 5",
"s: save screenshot",
"r: record gif (press + hold)",
"a: save scene",
"w: write saved scenes",
"h: display usage info",
"l: display level info"]
if self.help:
title_font = pygame.font.Font(font_name, 24)
title_text = title_font.render(self.modes[self.mode_index].name, True, text_color)
self.screen.blit(title_text, (xpos, ypos))
ypos += 25
title_text = title_font.render("Help (h)", True, text_color)
self.screen.blit(title_text, (xpos, ypos))
font = pygame.font.Font(font_name, 20)
for idx, i in enumerate(instructions, start=1):
text = font.render(i, True, text_color)
self.screen.blit(text, (xpos, ypos + 5 + idx * 20))
def toggle(self, attr):
"""
Toggle attribute
"""
if getattr(self, attr):
setattr(self, attr, False)
else:
setattr(self, attr, True)
def audio_stream(self):
"""
Emulate audio input
"""
audio_level = (int(-32768 * self.audio_level), int(32767 * self.audio_level))
if self.audio_trig:
self.audio_in = [random.randint(audio_level[0], audio_level[1]) for i in range(100)]
def color_picker(self):
"""
Original color_picker function from ETC. See link below:
https://github.com/critterandguitari/ETC_Mother/blob/master/etc_system.py
"""
# convert knob to 0-1
c = float(self.knob4)
# all the way down random bw
rando = random.randrange(0, 2)
color = (rando * 255, rando * 255, rando * 255)
# random greys
if c > .02 :
rando = random.randrange(0,255)
color = (rando, rando, rando)
# grey 1
if c > .04 :
color = (50, 50, 50)
# grey 2
if c > .06 :
color = (100, 100 ,100)
# grey 3
if c > .08 :
color = (150, 150 ,150)
# grey 4
if c > .10 :
color = (150, 150 ,150)
# grey 5
if c > .12 :
color = (200, 200 ,200)
# white
if c > .14 :
color = (250, 250 ,250)
#colors
if c > .16 :
r = math.sin(c * 2 * math.pi) * .5 + .5
g = math.sin(c * 4 * math.pi) * .5 + .5
b = math.sin(c * 8 * math.pi) * .5 + .5
color = (r * 255,g * 255,b * 255)
# full ranoms
if c > .96 :
color = (random.randrange(0,255), random.randrange(0,255), random.randrange(0,255))
# primary randoms
if c > .98 :
r = random.randrange(0, 2) * 255
g = random.randrange(0, 2) * 255
b = random.randrange(0, 2) * 255
color = (r,g,b)
color2 = (color[0], color[1], color[2])
return color2
def color_picker_bg(self):
"""
Original color_picker_bg function from ETC. See link below:
https://github.com/critterandguitari/ETC_Mother/blob/master/etc_system.py
"""
c = self.knob5
r = (1 - (math.cos(c * 3 * math.pi) * .5 + .5)) * c
g = (1 - (math.cos(c * 7 * math.pi) * .5 + .5)) * c
b = (1 - (math.cos(c * 11 * math.pi) * .5 + .5)) * c
color = (r * 255,g * 255,b * 255)
self.bg_color = color
return color
| 33.862319
| 106
| 0.527391
|
abdf54eb125199426eab4c0378e22309b08c20b3
| 1,697
|
py
|
Python
|
src/docserver/api/auth.py
|
djpugh/docserver
|
8b272ae8fe75b4773da6111a074a141bcf4698c9
|
[
"MIT"
] | 1
|
2020-12-29T09:16:51.000Z
|
2020-12-29T09:16:51.000Z
|
src/docserver/api/auth.py
|
djpugh/docserver
|
8b272ae8fe75b4773da6111a074a141bcf4698c9
|
[
"MIT"
] | 36
|
2020-11-14T18:47:37.000Z
|
2022-03-26T15:37:25.000Z
|
src/docserver/api/auth.py
|
djpugh/docserver
|
8b272ae8fe75b4773da6111a074a141bcf4698c9
|
[
"MIT"
] | null | null | null |
import logging
from fastapi import APIRouter, Depends, HTTPException
from docserver.api import schemas
from docserver.auth.authenticator import AuthenticationState, authenticator
logger = logging.getLogger(__name__)
router = APIRouter()
@router.get('/token/upload', response_model=schemas.TokenResponse)
async def get_upload_token(state: AuthenticationState = Depends(authenticator.auth_backend.requires_auth(allow_session=True))):
"""Get an application level token for the API"""
creds = state.user.permissions
permissions = [u for u in creds if u.endswith('/write')]
# Add any write credentials for any admin credentials
permissions += [u.replace('/admin', '/write') for u in creds if u.endswith('/admin')]
if not permissions:
permissions = None
logger.info(f'Credentials {creds}')
logger.info(f'API Permissions {permissions}')
if permissions is None:
raise PermissionError('Not authorised to create an API token')
result = authenticator.get_api_token(scopes=permissions)
result['token_type'] = 'Bearer'
logger.info(result)
return result
@router.get('/token/validate')
async def validate_token(state: AuthenticationState = Depends(authenticator.auth_backend.requires_auth(allow_session=False))):
"""Validate an auth token"""
if state.is_authenticated():
return {'detail': 'valid token'}
raise HTTPException(status_code=403, detail='Invalid token')
@router.get('/me', response_model=schemas.UserResponse)
async def get_me(state: AuthenticationState = Depends(authenticator.auth_backend.requires_auth(allow_session=True))):
logger.info(state)
return schemas.UserResponse.from_orm(state.user)
| 39.465116
| 127
| 0.750147
|
730c052353b0ff24b200472679fad20a5c850fa3
| 3,765
|
py
|
Python
|
borg/test/test_models.py
|
borg-project/borg
|
5140cff6c96de365b2eba9f07b7fc606b4b16c1b
|
[
"MIT"
] | 7
|
2015-03-13T06:40:19.000Z
|
2018-02-23T10:35:46.000Z
|
borg/test/test_models.py
|
borg-project/borg
|
5140cff6c96de365b2eba9f07b7fc606b4b16c1b
|
[
"MIT"
] | null | null | null |
borg/test/test_models.py
|
borg-project/borg
|
5140cff6c96de365b2eba9f07b7fc606b4b16c1b
|
[
"MIT"
] | 4
|
2015-06-17T15:51:45.000Z
|
2021-10-20T20:28:07.000Z
|
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import numpy
import nose.tools
import borg
def test_sampled_pmfs_log_pmf():
"""Test borg.models.sampled_pmfs_log_pmf()."""
cdfs = \
numpy.log([
[[0.1, 0.9], [0.9, 0.1]],
[[0.1, 0.9], [0.9, 0.1]],
])
counts = \
numpy.array(
[
[[1, 0], [0, 0]],
[[0, 0], [2, 0]],
[[1, 0], [2, 0]],
],
numpy.intc,
)
logs = borg.models.sampled_pmfs_log_pmf(cdfs, counts)
nose.tools.assert_almost_equal(numpy.exp(logs[0, 0]), 0.1)
nose.tools.assert_almost_equal(numpy.exp(logs[0, 1]), 0.9**2)
nose.tools.assert_almost_equal(numpy.exp(logs[0, 2]), 0.1 * 0.9**2)
def test_kernel_model_sample():
"""Test borg.models.KernelModel.sample()."""
successes = numpy.array([[0, 1], [1, 0], [0, 0]], numpy.intc)
failures = numpy.array([[0, 0], [0, 0], [0, 1]], numpy.intc)
durations = \
numpy.array([
[[numpy.nan], [42.0]],
[[24.0], [numpy.nan]],
[[numpy.nan], [numpy.nan]],
])
kernel = borg.models.DeltaKernel()
alpha = 1.0 + 1e-8
model = borg.models.KernelModel(successes, failures, durations, 100.0, alpha, kernel)
samples = model.sample(16, 4)
nose.tools.assert_true(numpy.all(numpy.logaddexp.reduce(samples, axis = -1) < 1e-10))
nose.tools.assert_true(numpy.any(numpy.abs(samples[..., 0] - numpy.log((alpha) / (5 * alpha - 4))) < 1e-10))
nose.tools.assert_true(numpy.any(numpy.abs(samples[..., -1] - numpy.log(1.0 / 5)) < 1e-10))
nose.tools.assert_true(numpy.any(numpy.abs(samples[..., -1] - numpy.log((alpha) / (5 * alpha - 4))) < 1e-10))
nose.tools.assert_true(numpy.any(numpy.abs(samples[..., -1] - numpy.log((alpha - 1) / (5 * alpha - 4))) < 1e-10))
def test_multinomial_model_fit():
"""Test borg.models.MultinomialModel.fit()."""
runs = [
("solver_a", 100.0, 1.0, True),
("solver_a", 100.0, 48.0, True),
("solver_a", 100.0, 100.0, False),
("solver_b", 100.0, 66.0, True),
("solver_b", 100.0, 77.0, True),
("solver_b", 100.0, 100.0, False),
]
training = borg.RunData()
for run in runs:
training.add_run("foo", borg.storage.RunRecord(*run))
training.add_run("bar", borg.storage.RunRecord(*run))
alpha = 1.0 + 1e-8
model = borg.models.MultinomialModel.fit(["solver_a", "solver_b"], training, 4, alpha)
components = numpy.exp(model.log_components)
nose.tools.assert_true(numpy.all(numpy.abs(components[0] - components[1]) == 0.0))
nose.tools.assert_true(numpy.all(numpy.abs(numpy.sum(components, axis = -1) - 1.0) < 1e-10))
nose.tools.assert_true(numpy.all(components[:, 0, 0] == components[:, 0, 1]))
nose.tools.assert_true(numpy.all(components[:, 0, 1] > components[:, 0, 2]))
nose.tools.assert_true(numpy.all(components[:, 1, 0] == components[:, 1, 1]))
nose.tools.assert_true(numpy.all(components[:, 1, 1] < components[:, 1, 2]))
def test_multinomial_model_condition():
model = borg.models.MultinomialModel(10.0, numpy.log([[[0.2, 0.1]], [[0.9, 0.8]]]), numpy.log([0.5, 0.5]))
posterior0 = model.condition([(0, 0)])
posterior1 = model.condition([(0, 1)])
nose.tools.assert_almost_equal(posterior0.log_weights[0], numpy.log(0.2 * 0.5 / (0.2 * 0.5 + 0.9 * 0.5)))
nose.tools.assert_almost_equal(posterior0.log_weights[1], numpy.log(0.9 * 0.5 / (0.2 * 0.5 + 0.9 * 0.5)))
nose.tools.assert_almost_equal(posterior1.log_weights[0], numpy.log(0.1 * 0.5 / (0.1 * 0.5 + 0.8 * 0.5)))
nose.tools.assert_almost_equal(posterior1.log_weights[1], numpy.log(0.8 * 0.5 / (0.1 * 0.5 + 0.8 * 0.5)))
| 41.373626
| 117
| 0.58008
|
417ac477f7c4f3d536b1a4bd96f947e6e4b6f8d5
| 29,514
|
py
|
Python
|
assembler.py
|
muffinjets/LADXR
|
bbd82a5b7bac015561bb6a4cfe1c5fa017f827f5
|
[
"MIT"
] | null | null | null |
assembler.py
|
muffinjets/LADXR
|
bbd82a5b7bac015561bb6a4cfe1c5fa017f827f5
|
[
"MIT"
] | null | null | null |
assembler.py
|
muffinjets/LADXR
|
bbd82a5b7bac015561bb6a4cfe1c5fa017f827f5
|
[
"MIT"
] | null | null | null |
import binascii
import utils
import re
REGS8 = {"A": 7, "B": 0, "C": 1, "D": 2, "E": 3, "H": 4, "L": 5, "[HL]": 6}
REGS16A = {"BC": 0, "DE": 1, "HL": 2, "SP": 3}
REGS16B = {"BC": 0, "DE": 1, "HL": 2, "AF": 3}
FLAGS = {"NZ": 0x00, "Z": 0x08, "NC": 0x10, "C": 0x18}
CONST_MAP = {}
class ExprBase:
def asReg8(self):
return None
def isA(self, kind, value=None):
return False
class Token(ExprBase):
def __init__(self, kind, value, line_nr):
self.kind = kind
self.value = value
self.line_nr = line_nr
def isA(self, kind, value=None):
return self.kind == kind and (value is None or value == self.value)
def __repr__(self):
return "[%s:%s:%d]" % (self.kind, self.value, self.line_nr)
def asReg8(self):
if self.kind == 'ID':
return REGS8.get(self.value, None)
return None
class REF(ExprBase):
def __init__(self, expr):
self.expr = expr
def asReg8(self):
if self.expr.isA('ID', 'HL'):
return REGS8['[HL]']
return None
def __repr__(self):
return "[%s]" % (self.expr)
class OP(ExprBase):
def __init__(self, op, left, right=None):
self.op = op
self.left = left
self.right = right
def __repr__(self):
return "%s %s %s" % (self.left, self.op, self.right)
@staticmethod
def make(op, left, right=None):
if left.isA('NUMBER') and right.isA('NUMBER'):
if op == '+':
left.value += right.value
return left
if op == '-':
left.value -= right.value
return left
if op == '*':
left.value *= right.value
return left
if op == '/':
left.value //= right.value
return left
if left.isA('NUMBER') and right is None:
if op == '+':
return left
if op == '-':
left.value = -left.value
return left
return OP(op, left, right)
class Tokenizer:
TOKEN_REGEX = re.compile('|'.join('(?P<%s>%s)' % pair for pair in [
('NUMBER', r'\d+(\.\d*)?'),
('HEX', r'\$[0-9A-Fa-f]+'),
('ASSIGN', r':='),
('COMMENT', r';[^\n]+'),
('LABEL', r':'),
('DIRECTIVE', r'#[A-Za-z_]+'),
('STRING', '[a-zA-Z]?"[^"]*"'),
('ID', r'\.?[A-Za-z_][A-Za-z0-9_\.]*'),
('OP', r'[+\-*/,\(\)]'),
('REFOPEN', r'\['),
('REFCLOSE', r'\]'),
('NEWLINE', r'\n'),
('SKIP', r'[ \t]+'),
('MISMATCH', r'.'),
]))
def __init__(self, code):
self.__tokens = []
line_num = 1
for mo in self.TOKEN_REGEX.finditer(code):
kind = mo.lastgroup
value = mo.group()
if kind == 'MISMATCH':
print(code.split("\n")[line_num-1])
raise RuntimeError("Syntax error on line: %d: %s\n%s", line_num, value)
elif kind == 'SKIP':
pass
elif kind == 'COMMENT':
pass
else:
if kind == 'NUMBER':
value = int(value)
elif kind == 'HEX':
value = int(value[1:], 16)
kind = 'NUMBER'
elif kind == 'ID':
value = value.upper()
self.__tokens.append(Token(kind, value, line_num))
if kind == 'NEWLINE':
line_num += 1
self.__tokens.append(Token('NEWLINE', '\n', line_num))
def peek(self):
return self.__tokens[0]
def pop(self):
return self.__tokens.pop(0)
def expect(self, kind, value=None):
pop = self.pop()
if not pop.isA(kind, value):
if value is not None:
raise SyntaxError("%s != %s:%s" % (pop, kind, value))
raise SyntaxError("%s != %s" % (pop, kind))
def __bool__(self):
return bool(self.__tokens)
class Assembler:
SIMPLE_INSTR = {
'NOP': 0x00,
'RLCA': 0x07,
'RRCA': 0x0F,
'STOP': 0x010,
'RLA': 0x17,
'RRA': 0x1F,
'DAA': 0x27,
'CPL': 0x2F,
'SCF': 0x37,
'CCF': 0x3F,
'HALT': 0x76,
'RETI': 0xD9,
'DI': 0xF3,
'EI': 0xFB,
}
LINK_REL8 = 0
LINK_ABS8 = 1
LINK_ABS16 = 2
def __init__(self, base_address=None):
self.__base_address = base_address
if base_address is None:
self.__base_address = -1
self.__result = bytearray()
self.__label = {}
self.__link = {}
self.__scope = None
self.__tok = None
def process(self, code):
conditional_stack = [True]
self.__tok = Tokenizer(code)
try:
while self.__tok:
start = self.__tok.pop()
if start.kind == 'NEWLINE':
pass # Empty newline
elif start.kind == 'DIRECTIVE':
if start.value == '#IF':
t = self.parseExpression()
conditional_stack.append(conditional_stack[-1] and t.value != 0)
self.__tok.expect('NEWLINE')
elif start.value == '#ELSE':
conditional_stack[-1] = not conditional_stack[-1] and conditional_stack[-2]
self.__tok.expect('NEWLINE')
elif start.value == '#ENDIF':
conditional_stack.pop()
assert conditional_stack
self.__tok.expect('NEWLINE')
else:
raise SyntaxError(start)
elif not conditional_stack[-1]:
while not self.__tok.pop().isA('NEWLINE'):
pass
elif start.kind == 'ID':
if start.value == 'DB':
self.instrDB()
self.__tok.expect('NEWLINE')
elif start.value == 'DW':
self.instrDW()
self.__tok.expect('NEWLINE')
elif start.value == 'LD':
self.instrLD()
self.__tok.expect('NEWLINE')
elif start.value == 'LDH':
self.instrLDH()
self.__tok.expect('NEWLINE')
elif start.value == 'LDI':
self.instrLDI()
self.__tok.expect('NEWLINE')
elif start.value == 'LDD':
self.instrLDD()
self.__tok.expect('NEWLINE')
elif start.value == 'INC':
self.instrINC()
self.__tok.expect('NEWLINE')
elif start.value == 'DEC':
self.instrDEC()
self.__tok.expect('NEWLINE')
elif start.value == 'ADD':
self.instrADD()
self.__tok.expect('NEWLINE')
elif start.value == 'ADC':
self.instrALU(0x88)
self.__tok.expect('NEWLINE')
elif start.value == 'SUB':
self.instrALU(0x90)
self.__tok.expect('NEWLINE')
elif start.value == 'SBC':
self.instrALU(0x98)
self.__tok.expect('NEWLINE')
elif start.value == 'AND':
self.instrALU(0xA0)
self.__tok.expect('NEWLINE')
elif start.value == 'XOR':
self.instrALU(0xA8)
self.__tok.expect('NEWLINE')
elif start.value == 'OR':
self.instrALU(0xB0)
self.__tok.expect('NEWLINE')
elif start.value == 'CP':
self.instrALU(0xB8)
self.__tok.expect('NEWLINE')
elif start.value == 'BIT':
self.instrBIT(0x40)
self.__tok.expect('NEWLINE')
elif start.value == 'RES':
self.instrBIT(0x80)
self.__tok.expect('NEWLINE')
elif start.value == 'SET':
self.instrBIT(0xC0)
self.__tok.expect('NEWLINE')
elif start.value == 'RET':
self.instrRET()
self.__tok.expect('NEWLINE')
elif start.value == 'CALL':
self.instrCALL()
self.__tok.expect('NEWLINE')
elif start.value == 'RLC':
self.instrCB(0x00)
self.__tok.expect('NEWLINE')
elif start.value == 'RRC':
self.instrCB(0x08)
self.__tok.expect('NEWLINE')
elif start.value == 'RL':
self.instrCB(0x10)
self.__tok.expect('NEWLINE')
elif start.value == 'RR':
self.instrCB(0x18)
self.__tok.expect('NEWLINE')
elif start.value == 'SLA':
self.instrCB(0x20)
self.__tok.expect('NEWLINE')
elif start.value == 'SRA':
self.instrCB(0x28)
self.__tok.expect('NEWLINE')
elif start.value == 'SWAP':
self.instrCB(0x30)
self.__tok.expect('NEWLINE')
elif start.value == 'SRL':
self.instrCB(0x38)
self.__tok.expect('NEWLINE')
elif start.value == 'RST':
self.instrRST()
self.__tok.expect('NEWLINE')
elif start.value == 'JP':
self.instrJP()
self.__tok.expect('NEWLINE')
elif start.value == 'JR':
self.instrJR()
self.__tok.expect('NEWLINE')
elif start.value == 'PUSH':
self.instrPUSHPOP(0xC5)
self.__tok.expect('NEWLINE')
elif start.value == 'POP':
self.instrPUSHPOP(0xC1)
self.__tok.expect('NEWLINE')
elif start.value in self.SIMPLE_INSTR:
self.__result.append(self.SIMPLE_INSTR[start.value])
self.__tok.expect('NEWLINE')
elif self.__tok.peek().kind == 'LABEL':
self.__tok.pop()
self.addLabel(start.value)
else:
raise SyntaxError(start)
else:
raise SyntaxError(start)
except SyntaxError:
print("Syntax error on line: %s" % code.split("\n")[self.__tok.peek().line_nr-1])
raise
def insert8(self, expr):
if expr.isA('NUMBER'):
value = expr.value
else:
self.__link[len(self.__result)] = (Assembler.LINK_ABS8, expr)
value = 0
assert 0 <= value < 256
self.__result.append(value)
def insertRel8(self, expr):
if expr.isA('NUMBER'):
self.__result.append(expr.value)
else:
self.__link[len(self.__result)] = (Assembler.LINK_REL8, expr)
self.__result.append(0x00)
def insert16(self, expr):
if expr.isA('NUMBER'):
value = expr.value
else:
self.__link[len(self.__result)] = (Assembler.LINK_ABS16, expr)
value = 0
assert 0 <= value <= 0xFFFF
self.__result.append(value & 0xFF)
self.__result.append(value >> 8)
def insertString(self, string):
if string.startswith('"') and string.endswith('"'):
self.__result += string[1:-1].encode("ascii")
elif string.startswith("m\"") and string.endswith("\""):
self.__result += utils.formatText(string[2:-1].replace("|", "\n"))
else:
raise SyntaxError
def instrLD(self):
left_param = self.parseParam()
self.__tok.expect('OP', ',')
right_param = self.parseParam()
if left_param.asReg8() is not None and right_param.asReg8() is not None:
self.__result.append(0x40 | (left_param.asReg8() << 3) | right_param.asReg8())
elif left_param.isA('ID', 'A') and isinstance(right_param, REF):
if right_param.expr.isA('ID', 'BC'):
self.__result.append(0x0A)
elif right_param.expr.isA('ID', 'DE'):
self.__result.append(0x1A)
elif right_param.expr.isA('ID', 'HL+'): # TODO
self.__result.append(0x2A)
elif right_param.expr.isA('ID', 'HL-'): # TODO
self.__result.append(0x3A)
elif right_param.expr.isA('ID', 'C'):
self.__result.append(0xF2)
else:
self.__result.append(0xFA)
self.insert16(right_param.expr)
elif right_param.isA('ID', 'A') and isinstance(left_param, REF):
if left_param.expr.isA('ID', 'BC'):
self.__result.append(0x02)
elif left_param.expr.isA('ID', 'DE'):
self.__result.append(0x12)
elif left_param.expr.isA('ID', 'HL+'): # TODO
self.__result.append(0x22)
elif left_param.expr.isA('ID', 'HL-'): # TODO
self.__result.append(0x32)
elif left_param.expr.isA('ID', 'C'):
self.__result.append(0xE2)
else:
self.__result.append(0xEA)
self.insert16(left_param.expr)
elif left_param.isA('ID', 'BC'):
self.__result.append(0x01)
self.insert16(right_param)
elif left_param.isA('ID', 'DE'):
self.__result.append(0x11)
self.insert16(right_param)
elif left_param.isA('ID', 'HL'):
self.__result.append(0x21)
self.insert16(right_param)
elif left_param.isA('ID', 'SP'):
if right_param.isA('ID', 'HL'):
self.__result.append(0xF9)
else:
self.__result.append(0x31)
self.insert16(right_param)
elif right_param.isA('ID', 'SP') and isinstance(left_param, REF):
self.__result.append(0x08)
self.insert16(left_param.expr)
elif left_param.asReg8() is not None:
self.__result.append(0x06 | (left_param.asReg8() << 3))
self.insert8(right_param)
else:
raise SyntaxError
def instrLDH(self):
left_param = self.parseParam()
self.__tok.expect('OP', ',')
right_param = self.parseParam()
if left_param.isA('ID', 'A') and isinstance(right_param, REF):
if right_param.expr.isA('ID', 'C'):
self.__result.append(0xF2)
else:
self.__result.append(0xF0)
self.insert8(right_param.expr)
elif right_param.isA('ID', 'A') and isinstance(left_param, REF):
if left_param.expr.isA('ID', 'C'):
self.__result.append(0xE2)
else:
self.__result.append(0xE0)
self.insert8(left_param.expr)
else:
raise SyntaxError
def instrLDI(self):
left_param = self.parseParam()
self.__tok.expect('OP', ',')
right_param = self.parseParam()
if left_param.isA('ID', 'A') and isinstance(right_param, REF) and right_param.expr.isA('ID', 'HL'):
self.__result.append(0x2A)
elif right_param.isA('ID', 'A') and isinstance(left_param, REF) and left_param.expr.isA('ID', 'HL'):
self.__result.append(0x22)
else:
raise SyntaxError
def instrLDD(self):
left_param = self.parseParam()
self.__tok.expect('OP', ',')
right_param = self.parseParam()
if left_param.isA('ID', 'A') and isinstance(right_param, REF) and right_param.expr.isA('ID', 'HL'):
self.__result.append(0x3A)
elif right_param.isA('ID', 'A') and isinstance(left_param, REF) and left_param.expr.isA('ID', 'HL'):
self.__result.append(0x32)
else:
raise SyntaxError
def instrINC(self):
param = self.parseParam()
if param.asReg8() is not None:
self.__result.append(0x04 | (param.asReg8() << 3))
elif param.isA('ID', 'BC'):
self.__result.append(0x03)
elif param.isA('ID', 'DE'):
self.__result.append(0x13)
elif param.isA('ID', 'HL'):
self.__result.append(0x23)
elif param.isA('ID', 'SP'):
self.__result.append(0x33)
else:
raise SyntaxError
def instrDEC(self):
param = self.parseParam()
if param.asReg8() is not None:
self.__result.append(0x05 | (param.asReg8() << 3))
elif param.isA('ID', 'BC'):
self.__result.append(0x0B)
elif param.isA('ID', 'DE'):
self.__result.append(0x1B)
elif param.isA('ID', 'HL'):
self.__result.append(0x2B)
elif param.isA('ID', 'SP'):
self.__result.append(0x3B)
else:
raise SyntaxError
def instrADD(self):
left_param = self.parseParam()
self.__tok.expect('OP', ',')
right_param = self.parseParam()
if left_param.isA('ID', 'A'):
if right_param.asReg8() is not None:
self.__result.append(0x80 | right_param.asReg8())
else:
self.__result.append(0xC6)
self.insert8(right_param)
elif left_param.isA('ID', 'HL') and right_param.isA('ID') and right_param.value in REGS16A:
self.__result.append(0x09 | REGS16A[right_param.value] << 4)
elif left_param.isA('ID', 'SP'):
self.__result.append(0xE8)
self.insert8(right_param)
else:
raise SyntaxError
def instrALU(self, code_value):
param = self.parseParam()
if param.isA('ID', 'A') and self.__tok.peek().isA('OP', ','):
self.__tok.pop()
param = self.parseParam()
if param.asReg8() is not None:
self.__result.append(code_value | param.asReg8())
else:
self.__result.append(code_value | 0x46)
self.insert8(param)
def instrRST(self):
param = self.parseParam()
if param.isA('NUMBER') and (param.value & ~0x38) == 0:
self.__result.append(0xC7 | param.value)
else:
raise SyntaxError
def instrPUSHPOP(self, code_value):
param = self.parseParam()
if param.isA('ID') and param.value in REGS16B:
self.__result.append(code_value | (REGS16B[param.value] << 4))
else:
raise SyntaxError
def instrJR(self):
param = self.parseParam()
if self.__tok.peek().isA('OP', ','):
self.__tok.pop()
condition = param
param = self.parseParam()
if condition.isA('ID') and condition.value in FLAGS:
self.__result.append(0x20 | FLAGS[condition.value])
else:
raise SyntaxError
else:
self.__result.append(0x18)
self.insertRel8(param)
def instrCB(self, code_value):
param = self.parseParam()
if param.asReg8() is not None:
self.__result.append(0xCB)
self.__result.append(code_value | param.asReg8())
else:
raise SyntaxError
def instrBIT(self, code_value):
left_param = self.parseParam()
self.__tok.expect('OP', ',')
right_param = self.parseParam()
if left_param.isA('NUMBER') and right_param.asReg8() is not None:
self.__result.append(0xCB)
self.__result.append(code_value | (left_param.value << 3) | right_param.asReg8())
else:
raise SyntaxError
def instrRET(self):
if self.__tok.peek().isA('ID'):
condition = self.__tok.pop()
if condition.isA('ID') and condition.value in FLAGS:
self.__result.append(0xC0 | FLAGS[condition.value])
else:
raise SyntaxError
else:
self.__result.append(0xC9)
def instrCALL(self):
param = self.parseParam()
if self.__tok.peek().isA('OP', ','):
self.__tok.pop()
condition = param
param = self.parseParam()
if condition.isA('ID') and condition.value in FLAGS:
self.__result.append(0xC4 | FLAGS[condition.value])
else:
raise SyntaxError
else:
self.__result.append(0xCD)
self.insert16(param)
def instrJP(self):
param = self.parseParam()
if self.__tok.peek().isA('OP', ','):
self.__tok.pop()
condition = param
param = self.parseParam()
if condition.isA('ID') and condition.value in FLAGS:
self.__result.append(0xC2 | FLAGS[condition.value])
else:
raise SyntaxError
elif param.isA('ID', 'HL'):
self.__result.append(0xE9)
return
else:
self.__result.append(0xC3)
self.insert16(param)
def instrDW(self):
param = self.parseExpression()
self.insert16(param)
while self.__tok.peek().isA('OP', ','):
self.__tok.pop()
param = self.parseExpression()
self.insert16(param)
def instrDB(self):
param = self.parseExpression()
if param.isA('STRING'):
self.insertString(param.value)
else:
self.insert8(param)
while self.__tok.peek().isA('OP', ','):
self.__tok.pop()
param = self.parseExpression()
if param.isA('STRING'):
self.insertString(param.value)
else:
self.insert8(param)
def addLabel(self, label):
if label.startswith("."):
label = self.__scope + label
else:
assert "." not in label, label
self.__scope = label
assert label not in self.__label, "Duplicate label: %s" % (label)
self.__label[label] = len(self.__result)
def parseParam(self):
t = self.__tok.peek()
if t.kind == 'REFOPEN':
self.__tok.pop()
expr = self.parseExpression()
self.__tok.expect('REFCLOSE')
return REF(expr)
return self.parseExpression()
def parseExpression(self):
t = self.parseAddSub()
return t
def parseAddSub(self):
t = self.parseFactor()
p = self.__tok.peek()
if p.isA('OP', '+') or p.isA('OP', '-'):
self.__tok.pop()
return OP.make(p.value, t, self.parseAddSub())
return t
def parseFactor(self):
t = self.parseUnary()
p = self.__tok.peek()
if p.isA('OP', '*') or p.isA('OP', '/'):
self.__tok.pop()
return OP.make(p.value, t, self.parseFactor())
return t
def parseUnary(self):
t = self.__tok.pop()
if t.isA('OP', '-') or t.isA('OP', '+'):
return OP.make(t.value, self.parseUnary())
elif t.isA('OP', '('):
t = self.parseExpression()
self.__tok.expect('OP', ')')
return t
if t.kind not in ('ID', 'NUMBER', 'STRING'):
raise SyntaxError
if t.isA('ID') and t.value in CONST_MAP:
t.kind = 'NUMBER'
t.value = CONST_MAP[t.value]
elif t.isA('ID') and t.value.startswith("."):
t.value = self.__scope + t.value
return t
def link(self):
for offset, (link_type, expr) in self.__link.items():
expr = self.resolveExpr(expr)
assert expr.isA('NUMBER'), expr
value = expr.value
if link_type == Assembler.LINK_REL8:
byte = (value - self.__base_address) - offset - 1
assert -128 <= byte <= 127, label
self.__result[offset] = byte & 0xFF
elif link_type == Assembler.LINK_ABS8:
assert 0 <= value <= 0xFF
self.__result[offset] = value & 0xFF
elif link_type == Assembler.LINK_ABS16:
assert self.__base_address >= 0, "Cannot place absolute values in a relocatable code piece"
assert 0 <= value <= 0xFFFF
self.__result[offset] = value & 0xFF
self.__result[offset + 1] = value >> 8
else:
raise RuntimeError
def resolveExpr(self, expr):
if expr is None:
return None
elif isinstance(expr, OP):
return OP.make(expr.op, self.resolveExpr(expr.left), self.resolveExpr(expr.right))
elif expr.isA('ID') and expr.value in self.__label:
return Token('NUMBER', self.__label[expr.value] + self.__base_address, expr.line_nr)
return expr
def getResult(self):
return self.__result
def getLabels(self):
return self.__label.items()
def const(name, value):
name = name.upper()
assert name not in CONST_MAP
CONST_MAP[name] = value
def resetConsts():
CONST_MAP.clear()
def ASM(code, base_address=None, labels_result=None):
asm = Assembler(base_address)
asm.process(code)
asm.link()
if labels_result is not None:
for label, offset in asm.getLabels():
labels_result[label] = base_address + offset
return binascii.hexlify(asm.getResult())
def allOpcodesTest():
import json
opcodes = json.load(open("Opcodes.json", "rt"))
for label in (False, True):
for prefix, codes in opcodes.items():
for num, op in codes.items():
if op['mnemonic'].startswith('ILLEGAL_') or op['mnemonic'] == 'PREFIX':
continue
params = []
postfix = ''
for o in op['operands']:
name = o['name']
if name == 'd16' or name == 'a16':
if label:
name = 'LABEL'
else:
name = '$0000'
if name == 'd8' or name == 'a8':
name = '$00'
if name == 'r8':
if label and num != '0xE8':
name = 'LABEL'
else:
name = '$00'
if name[-1] == 'H' and name[0].isnumeric():
name = '$' + name[:-1]
if o['immediate']:
params.append(name)
else:
params.append("[%s]" % (name))
if 'increment' in o and o['increment']:
postfix = 'I'
if 'decrement' in o and o['decrement']:
postfix = 'D'
code = op["mnemonic"] + postfix + " " + ", ".join(params)
code = code.strip()
try:
data = ASM("LABEL:\n%s" % (code), 0x0000)
if prefix == 'cbprefixed':
assert data[0:2] == b'cb'
data = data[2:]
assert data[0:2] == num[2:].encode('ascii').lower(), data[0:2] + b"!=" + num[2:].encode('ascii').lower()
except Exception as e:
print("%s\t\t|%r|\t%s" % (code, e, num))
print(op)
if __name__ == "__main__":
#allOpcodesTest()
const("CONST1", 1)
const("CONST2", 2)
ASM("""
ld a, (123)
ld hl, $1234 + 456
ld hl, $1234 + CONST1
ld hl, label
ld hl, label.end - label
ld c, label.end - label
label:
nop
.end:
""", 0)
ASM("""
jr label
label:
""")
assert ASM("db 1 + 2 * 3") == b'07'
| 36.846442
| 125
| 0.46771
|
bb00b89d3986d3c9f0ed00ee1bc886cf9221a8a3
| 36,170
|
py
|
Python
|
salt/modules/nilrt_ip.py
|
jasonarewhy/salt
|
4e54f213c511586c9232e961adf9afafd945f9f2
|
[
"Apache-2.0"
] | 1
|
2016-08-21T21:19:12.000Z
|
2016-08-21T21:19:12.000Z
|
salt/modules/nilrt_ip.py
|
dubb-b/salt
|
20f23f5c98f600f557b3eff6e67543ae4c7a14d7
|
[
"Apache-2.0"
] | 2
|
2019-03-06T20:43:44.000Z
|
2019-04-10T23:56:02.000Z
|
salt/modules/nilrt_ip.py
|
dubb-b/salt
|
20f23f5c98f600f557b3eff6e67543ae4c7a14d7
|
[
"Apache-2.0"
] | 1
|
2020-04-10T20:18:40.000Z
|
2020-04-10T20:18:40.000Z
|
# -*- coding: utf-8 -*-
'''
The networking module for NI Linux Real-Time distro
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import os
import re
# Import salt libs
import salt.exceptions
import salt.utils.files
import salt.utils.validate.net
# Import 3rd-party libs
# pylint: disable=import-error,redefined-builtin,no-name-in-module
from salt.ext.six.moves import map, range, configparser
from salt.ext import six
# pylint: enable=import-error,redefined-builtin,no-name-in-module
try:
import pyconnman
except ImportError:
pyconnman = None
try:
import dbus
except ImportError:
dbus = None
try:
import pyiface
from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING
except ImportError:
pyiface = None
try:
from requests.structures import CaseInsensitiveDict
except ImportError:
CaseInsensitiveDict = None
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'ip'
SERVICE_PATH = '/net/connman/service/'
INTERFACES_CONFIG = '/var/lib/connman/interfaces.config'
NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg'
INI_FILE = '/etc/natinst/share/ni-rt.ini'
_CONFIG_TRUE = ['yes', 'on', 'true', '1', True]
NIRTCFG_ETHERCAT = 'EtherCAT'
def _assume_condition(condition, err):
'''
Raise an exception if the condition is false
'''
if not condition:
raise RuntimeError(err)
def __virtual__():
'''
Confine this module to NI Linux Real-Time based distros
'''
try:
msg = 'The nilrt_ip module could not be loaded: unsupported OS family'
_assume_condition(__grains__['os_family'] == 'NILinuxRT', msg)
_assume_condition(CaseInsensitiveDict, 'The python package request is not installed')
_assume_condition(pyiface, 'The python pyiface package is not installed')
if __grains__['lsb_distrib_id'] != 'nilrt':
_assume_condition(pyconnman, 'The python package pyconnman is not installed')
_assume_condition(dbus, 'The python DBus package is not installed')
_assume_condition(_get_state() != 'offline', 'Connman is not running')
except RuntimeError as exc:
return False, str(exc)
return __virtualname__
def _get_state():
'''
Returns the state of connman
'''
try:
return pyconnman.ConnManager().get_property('State')
except KeyError:
return 'offline'
except dbus.DBusException as exc:
raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc))
def _get_technologies():
'''
Returns the technologies of connman
'''
tech = ''
technologies = pyconnman.ConnManager().get_technologies()
for path, params in technologies:
tech += '{0}\n\tName = {1}\n\tType = {2}\n\tPowered = {3}\n\tConnected = {4}\n'.format(
path, params['Name'], params['Type'], params['Powered'] == 1, params['Connected'] == 1)
return tech
def _get_services():
'''
Returns a list with all connman services
'''
serv = []
services = pyconnman.ConnManager().get_services()
for path, _ in services:
serv.append(six.text_type(path[len(SERVICE_PATH):]))
return serv
def _connected(service):
'''
Verify if a connman service is connected
'''
state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State')
return state == 'online' or state == 'ready'
def _space_delimited_list(value):
'''
validate that a value contains one or more space-delimited values
'''
if isinstance(value, six.string_types):
items = value.split(' ')
valid = items and all(items)
else:
valid = hasattr(value, '__iter__') and (value != [])
if valid:
return True, 'space-delimited string'
return False, '{0} is not a valid list.\n'.format(value)
def _validate_ipv4(value):
'''
validate ipv4 values
'''
if len(value) == 3:
if not salt.utils.validate.net.ipv4_addr(value[0].strip()):
return False, 'Invalid ip address: {0} for ipv4 option'.format(value[0])
if not salt.utils.validate.net.netmask(value[1].strip()):
return False, 'Invalid netmask: {0} for ipv4 option'.format(value[1])
if not salt.utils.validate.net.ipv4_addr(value[2].strip()):
return False, 'Invalid gateway: {0} for ipv4 option'.format(value[2])
else:
return False, 'Invalid value: {0} for ipv4 option'.format(value)
return True, ''
def _interface_to_service(iface):
'''
returns the coresponding service to given interface if exists, otherwise return None
'''
for _service in _get_services():
service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service))
if service_info.get_property('Ethernet')['Interface'] == iface:
return _service
return None
def _get_service_info(service):
'''
return details about given connman service
'''
service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, service))
data = {
'label': service,
'wireless': service_info.get_property('Type') == 'wifi',
'connectionid': six.text_type(service_info.get_property('Ethernet')['Interface']),
'hwaddr': six.text_type(service_info.get_property('Ethernet')['Address'])
}
state = service_info.get_property('State')
if state == 'ready' or state == 'online':
data['up'] = True
data['ipv4'] = {
'gateway': '0.0.0.0'
}
ipv4 = 'IPv4'
if service_info.get_property('IPv4')['Method'] == 'manual':
ipv4 += '.Configuration'
ipv4_info = service_info.get_property(ipv4)
for info in ['Method', 'Address', 'Netmask', 'Gateway']:
value = ipv4_info.get(info)
if value is None:
log.warning('Unable to get IPv4 %s for service %s\n', info, service)
continue
if info == 'Method':
info = 'requestmode'
if value == 'dhcp':
value = 'dhcp_linklocal'
elif value in ('manual', 'fixed'):
value = 'static'
data['ipv4'][info.lower()] = six.text_type(value)
ipv6_info = service_info.get_property('IPv6')
for info in ['Address', 'Prefix', 'Gateway']:
value = ipv6_info.get(info)
if value is None:
log.warning('Unable to get IPv6 %s for service %s\n', info, service)
continue
if 'ipv6' not in data:
data['ipv6'] = {}
data['ipv6'][info.lower()] = [six.text_type(value)]
nameservers = []
for nameserver_prop in service_info.get_property('Nameservers'):
nameservers.append(six.text_type(nameserver_prop))
data['ipv4']['dns'] = nameservers
else:
data['up'] = False
data['ipv4'] = {
'requestmode': 'disabled'
}
data['ipv4']['supportedrequestmodes'] = [
'dhcp_linklocal',
'disabled',
'static'
]
return data
def _get_dns_info():
'''
return dns list
'''
dns_list = []
try:
with salt.utils.files.fopen('/etc/resolv.conf', 'r+') as dns_info:
lines = dns_info.readlines()
for line in lines:
if 'nameserver' in line:
dns = line.split()[1].strip()
if dns not in dns_list:
dns_list.append(dns)
except IOError:
log.warning('Could not get domain\n')
return dns_list
def _remove_quotes(value):
'''
Remove leading and trailing double quotes if they exist.
'''
# nirtcfg writes values with quotes
if len(value) > 1 and value[0] == value[-1] == '\"':
value = value[1:-1]
return value
def _load_config(section, options, default_value='', filename=INI_FILE):
'''
Get values for some options and a given section from a config file.
:param section: Section Name
:param options: List of options
:param default_value: Default value if an option doesn't have a value. Default is empty string.
:param filename: config file. Default is INI_FILE.
:return:
'''
results = {}
if not options:
return results
with salt.utils.files.fopen(filename, 'r') as config_file:
config_parser = configparser.RawConfigParser(dict_type=CaseInsensitiveDict)
config_parser.readfp(config_file)
for option in options:
results[option] = _remove_quotes(config_parser.get(section, option)) \
if config_parser.has_option(section, option) else default_value
return results
def _get_request_mode_info(interface):
'''
return requestmode for given interface
'''
settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1)
link_local_enabled = int(settings['linklocalenabled'])
dhcp_enabled = int(settings['dhcpenabled'])
if dhcp_enabled == 1:
return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only'
else:
if link_local_enabled == 1:
return 'linklocal_only'
if link_local_enabled == 0:
return 'static'
# some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables
# when selecting "DHCP or Link Local" from MAX, so return it by default to avoid
# having the requestmode "None" because none of the conditions above matched.
return 'dhcp_linklocal'
def _get_adapter_mode_info(interface):
'''
return adaptermode for given interface
'''
mode = _load_config(interface, ['mode'])['mode'].lower()
return mode if mode in ['disabled', 'ethercat'] else 'tcpip'
def _get_possible_adapter_modes(interface, blacklist):
'''
Return possible adapter modes for a given interface using a blacklist.
:param interface: interface name
:param blacklist: given blacklist
:return: list of possible adapter modes
'''
adapter_modes = []
protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower()
sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface))
with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file:
uevent_lines = uevent_file.readlines()
uevent_devtype = ""
for line in uevent_lines:
if line.startswith("DEVTYPE="):
uevent_devtype = line.split('=')[1].strip()
break
for adapter_mode in blacklist:
if adapter_mode == '_':
continue
value = blacklist.get(adapter_mode, {})
if value.get('additional_protocol') and adapter_mode not in protocols:
continue
if interface not in value['name'] \
and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or
(blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype)
for iface_type in value['type']):
adapter_modes += [adapter_mode]
return adapter_modes
def _get_static_info(interface):
'''
Return information about an interface from config file.
:param interface: interface label
'''
data = {
'connectionid': interface.name,
'label': interface.name,
'hwaddr': interface.hwaddr[:-1],
'up': False,
'ipv4': {
'supportedrequestmodes': ['dhcp_linklocal', 'disabled', 'static'],
'requestmode': 'dhcp_linklocal'
},
'wireless': False
}
hwaddr_section_number = ''.join(data['hwaddr'].split(':'))
if os.path.exists(INTERFACES_CONFIG):
information = _load_config('service_' + hwaddr_section_number, ['IPv4', 'Nameservers', 'IPv4.method'],
filename=INTERFACES_CONFIG)
if information['IPv4.method'] == 'manual' and information['IPv4'] != '':
ipv4_information = information['IPv4'].split('/')
data['ipv4']['address'] = ipv4_information[0]
data['ipv4']['dns'] = '' if information['Nameservers'] == '\'\'' else information['Nameservers'].split(',')
data['ipv4']['netmask'] = ipv4_information[1]
data['ipv4']['gateway'] = ipv4_information[2]
data['ipv4']['requestmode'] = 'static'
elif information['IPv4'] == 'off':
data['ipv4']['requestmode'] = 'disabled'
return data
def _get_base_interface_info(interface):
'''
return base details about given interface
'''
blacklist = {
'tcpip': {
'name': [],
'type': [],
'additional_protocol': False
},
'disabled': {
'name': ['eth0'],
'type': ['gadget'],
'additional_protocol': False
},
'ethercat': {
'name': ['eth0'],
'type': ['gadget', 'usb', 'wlan'],
'additional_protocol': True
},
'_': {
'usb': 'sys',
'gadget': 'uevent',
'wlan': 'uevent'
}
}
return {
'label': interface.name,
'connectionid': interface.name,
'supported_adapter_modes': _get_possible_adapter_modes(interface.name, blacklist),
'adapter_mode': _get_adapter_mode_info(interface.name),
'up': interface.flags & IFF_RUNNING != 0,
'ipv4': {
'supportedrequestmodes': ['dhcp_linklocal', 'dhcp_only', 'linklocal_only', 'static'],
'requestmode': _get_request_mode_info(interface.name)
},
'hwaddr': interface.hwaddr[:-1]
}
def _get_ethercat_interface_info(interface):
'''
return details about given ethercat interface
'''
base_information = _get_base_interface_info(interface)
base_information['ethercat'] = {
'masterid': _load_config(interface.name, ['MasterID'])['MasterID']
}
return base_information
def _get_tcpip_interface_info(interface):
'''
return details about given tcpip interface
'''
base_information = _get_base_interface_info(interface)
if base_information['ipv4']['requestmode'] == 'static':
settings = _load_config(interface.name, ['IP_Address', 'Subnet_Mask', 'Gateway', 'DNS_Address'])
base_information['ipv4']['address'] = settings['IP_Address']
base_information['ipv4']['netmask'] = settings['Subnet_Mask']
base_information['ipv4']['gateway'] = settings['Gateway']
base_information['ipv4']['dns'] = [settings['DNS_Address']]
elif base_information['up']:
base_information['ipv4']['address'] = interface.sockaddrToStr(interface.addr)
base_information['ipv4']['netmask'] = interface.sockaddrToStr(interface.netmask)
base_information['ipv4']['gateway'] = '0.0.0.0'
base_information['ipv4']['dns'] = _get_dns_info()
with salt.utils.files.fopen('/proc/net/route', 'r') as route_file:
pattern = re.compile(r'^{interface}\t[0]{{8}}\t([0-9A-Z]{{8}})'.format(interface=interface.name),
re.MULTILINE)
match = pattern.search(route_file.read())
iface_gateway_hex = None if not match else match.group(1)
if iface_gateway_hex is not None and len(iface_gateway_hex) == 8:
base_information['ipv4']['gateway'] = '.'.join([str(int(iface_gateway_hex[i:i + 2], 16))
for i in range(6, -1, -2)])
return base_information
def _get_interface_info(interface):
'''
return details about given interface
'''
adapter_mode = _get_adapter_mode_info(interface.name)
if adapter_mode == 'disabled':
return _get_base_interface_info(interface)
elif adapter_mode == 'ethercat':
return _get_ethercat_interface_info(interface)
return _get_tcpip_interface_info(interface)
def _dict_to_string(dictionary):
'''
converts a dictionary object into a list of strings
'''
ret = ''
for key, val in sorted(dictionary.items()):
if isinstance(val, dict):
for line in _dict_to_string(val):
ret += six.text_type(key) + '-' + line + '\n'
elif isinstance(val, list):
text = ' '.join([six.text_type(item) for item in val])
ret += six.text_type(key) + ': ' + text + '\n'
else:
ret += six.text_type(key) + ': ' + six.text_type(val) + '\n'
return ret.splitlines()
def _get_info(interface):
'''
Return information about an interface even if it's not associated with a service.
:param interface: interface label
'''
service = _interface_to_service(interface.name)
if service is not None:
return _get_service_info(service)
return _get_static_info(interface)
def get_interfaces_details():
'''
Get details about all the interfaces on the minion
:return: information about all interfaces omitting loopback
:rtype: dictionary
CLI Example:
.. code-block:: bash
salt '*' ip.get_interfaces_details
'''
_interfaces = [interface for interface in pyiface.getIfaces() if interface.flags & IFF_LOOPBACK == 0]
if __grains__['lsb_distrib_id'] == 'nilrt':
return {'interfaces': list(map(_get_interface_info, _interfaces))}
return {'interfaces': list(map(_get_info, _interfaces))}
def _change_state_legacy(interface, new_state):
'''
Enable or disable an interface on a legacy distro
Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot.
:param interface: interface label
:param new_state: up or down
:return: True if the service was enabled, otherwise an exception will be thrown.
:rtype: bool
'''
initial_mode = _get_adapter_mode_info(interface)
_save_config(interface, 'Mode', 'TCPIP' if new_state == 'up' else 'Disabled')
if initial_mode == 'ethercat':
__salt__['system.set_reboot_required_witnessed']()
else:
out = __salt__['cmd.run_all']('ip link set {0} {1}'.format(interface, new_state))
if out['retcode'] != 0:
msg = 'Couldn\'t {0} interface {1}. Error: {2}'.format('enable' if new_state == 'up' else 'disable',
interface, out['stderr'])
raise salt.exceptions.CommandExecutionError(msg)
return True
def _change_dhcp_config(interface, enable_dhcp=True, filename=INTERFACES_CONFIG):
'''
Enable or disable dhcp for an interface which isn't a service (in a config file)
:param interface: interface label
:param enable_dhcp: True to enable dhcp and False to disable dhcp. Default is True
:param filename: Config file name. Default is INTERFACES_CONFIG.
'''
parser = configparser.ConfigParser()
parser.optionxform = str
if os.path.exists(filename):
try:
with salt.utils.files.fopen(filename, 'r') as config_file:
parser.readfp(config_file)
except configparser.MissingSectionHeaderError:
pass
interface = pyiface.Interface(name=interface)
hwaddr = interface.hwaddr[:-1]
hwaddr_section_number = ''.join(hwaddr.split(':'))
if parser.has_section('service_{0}'.format(hwaddr_section_number)):
parser.remove_section('service_{0}'.format(hwaddr_section_number))
parser.add_section('service_{0}'.format(hwaddr_section_number))
parser.set('service_{0}'.format(hwaddr_section_number), 'MAC', hwaddr)
parser.set('service_{0}'.format(hwaddr_section_number), 'Name', 'ethernet_cable_{0}'.format(hwaddr_section_number))
parser.set('service_{0}'.format(hwaddr_section_number), 'Type', 'ethernet')
if enable_dhcp:
parser.set('service_{0}'.format(hwaddr_section_number), 'IPv4.Method', 'dhcp')
parser.set('service_{0}'.format(hwaddr_section_number), 'AutoConnect', 'true')
parser.set('service_{0}'.format(hwaddr_section_number), 'Nameservers', '\'\'')
else:
parser.set('service_{0}'.format(hwaddr_section_number), 'IPv4', 'off')
with salt.utils.files.fopen(filename, 'w') as config_file:
parser.write(config_file)
return True
def _change_state(interface, new_state):
'''
Enable or disable an interface
Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot.
:param interface: interface label
:param new_state: up or down
:return: True if the service was enabled, otherwise an exception will be thrown.
:rtype: bool
'''
if __grains__['lsb_distrib_id'] == 'nilrt':
return _change_state_legacy(interface, new_state)
if interface in [x.name for x in pyiface.getIfaces()]:
return _change_dhcp_config(interface) if new_state == 'up' else _change_dhcp_config(interface, False)
raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface))
def up(interface, iface_type=None): # pylint: disable=invalid-name,unused-argument
'''
Enable the specified interface
Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot.
:param str interface: interface label
:return: True if the service was enabled, otherwise an exception will be thrown.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' ip.up interface-label
'''
return _change_state(interface, 'up')
def enable(interface):
'''
Enable the specified interface
Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot.
:param str interface: interface label
:return: True if the service was enabled, otherwise an exception will be thrown.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' ip.enable interface-label
'''
return up(interface)
def down(interface, iface_type=None): # pylint: disable=unused-argument
'''
Disable the specified interface
Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot.
:param str interface: interface label
:return: True if the service was disabled, otherwise an exception will be thrown.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' ip.down interface-label
'''
return _change_state(interface, 'down')
def disable(interface):
'''
Disable the specified interface
Change adapter mode to Disabled. If previous adapter mode was EtherCAT, the target will need reboot.
:param str interface: interface label
:return: True if the service was disabled, otherwise an exception will be thrown.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' ip.disable interface-label
'''
return down(interface)
def _save_config(section, token, value):
'''
Helper function to persist a configuration in the ini file
'''
cmd = NIRTCFG_PATH
cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value)
if __salt__['cmd.run_all'](cmd)['retcode'] != 0:
exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section)
raise salt.exceptions.CommandExecutionError(exc_msg)
def set_ethercat(interface, master_id):
'''
Configure specified adapter to use EtherCAT adapter mode. If successful, the target will need reboot if it doesn't
already use EtherCAT adapter mode, otherwise will return true.
:param interface: interface label
:param master_id: EtherCAT Master ID
:return: True if the settings were applied, otherwise an exception will be thrown.
CLI Example:
.. code-block:: bash
salt '*' ip.set_ethercat interface-label master-id
'''
if __grains__['lsb_distrib_id'] == 'nilrt':
initial_mode = _get_adapter_mode_info(interface)
_save_config(interface, 'Mode', NIRTCFG_ETHERCAT)
_save_config(interface, 'MasterID', master_id)
if initial_mode != 'ethercat':
__salt__['system.set_reboot_required_witnessed']()
return True
raise salt.exceptions.CommandExecutionError('EtherCAT is not supported')
def _restart(interface):
'''
Disable and enable an interface
'''
disable(interface)
enable(interface)
def set_dhcp_linklocal_all(interface):
'''
Configure specified adapter to use DHCP with linklocal fallback
Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot.
:param str interface: interface label
:return: True if the settings were applied, otherwise an exception will be thrown.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' ip.set_dhcp_linklocal_all interface-label
'''
if __grains__['lsb_distrib_id'] == 'nilrt':
initial_mode = _get_adapter_mode_info(interface)
_save_config(interface, 'Mode', 'TCPIP')
_save_config(interface, 'dhcpenabled', '1')
_save_config(interface, 'linklocalenabled', '1')
if initial_mode == 'ethercat':
__salt__['system.set_reboot_required_witnessed']()
else:
_restart(interface)
return True
if interface in [x.name for x in pyiface.getIfaces()]:
return _change_dhcp_config(interface)
raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface))
def set_dhcp_only_all(interface):
'''
Configure specified adapter to use DHCP only
Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot.
:param str interface: interface label
:return: True if the settings were applied, otherwise an exception will be thrown.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' ip.dhcp_only_all interface-label
'''
if not __grains__['lsb_distrib_id'] == 'nilrt':
raise salt.exceptions.CommandExecutionError('Not supported in this version')
initial_mode = _get_adapter_mode_info(interface)
_save_config(interface, 'Mode', 'TCPIP')
_save_config(interface, 'dhcpenabled', '1')
_save_config(interface, 'linklocalenabled', '0')
if initial_mode == 'ethercat':
__salt__['system.set_reboot_required_witnessed']()
else:
_restart(interface)
return True
def set_linklocal_only_all(interface):
'''
Configure specified adapter to use linklocal only
Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot.
:param str interface: interface label
:return: True if the settings were applied, otherwise an exception will be thrown.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' ip.linklocal_only_all interface-label
'''
if not __grains__['lsb_distrib_id'] == 'nilrt':
raise salt.exceptions.CommandExecutionError('Not supported in this version')
initial_mode = _get_adapter_mode_info(interface)
_save_config(interface, 'Mode', 'TCPIP')
_save_config(interface, 'dhcpenabled', '0')
_save_config(interface, 'linklocalenabled', '1')
if initial_mode == 'ethercat':
__salt__['system.set_reboot_required_witnessed']()
else:
_restart(interface)
return True
def _configure_static_interface(interface, **settings):
'''
Configure an interface that is not detected as a service by Connman (i.e. link is down)
:param interface: interface label
:param settings:
- ip
- netmask
- gateway
- dns
- name
:return: True if settings were applied successfully.
:rtype: bool
'''
interface = pyiface.Interface(name=interface)
parser = configparser.ConfigParser()
parser.optionxform = str
if os.path.exists(INTERFACES_CONFIG):
try:
with salt.utils.files.fopen(INTERFACES_CONFIG, 'r') as config_file:
parser.readfp(config_file)
except configparser.MissingSectionHeaderError:
pass
hwaddr = interface.hwaddr[:-1]
hwaddr_section_number = ''.join(hwaddr.split(':'))
if parser.has_section('service_{0}'.format(hwaddr_section_number)):
parser.remove_section('service_{0}'.format(hwaddr_section_number))
parser.add_section('service_{0}'.format(hwaddr_section_number))
ip_address = settings.get('ip', '0.0.0.0')
netmask = settings.get('netmask', '0.0.0.0')
gateway = settings.get('gateway', '0.0.0.0')
dns_servers = settings.get('dns', '\'\'')
name = settings.get('name', 'ethernet_cable_{0}'.format(hwaddr_section_number))
parser.set('service_{0}'.format(hwaddr_section_number), 'IPv4', '{0}/{1}/{2}'.
format(ip_address, netmask, gateway))
parser.set('service_{0}'.format(hwaddr_section_number), 'Nameservers', dns_servers)
parser.set('service_{0}'.format(hwaddr_section_number), 'Name', name)
parser.set('service_{0}'.format(hwaddr_section_number), 'MAC', hwaddr)
parser.set('service_{0}'.format(hwaddr_section_number), 'Type', 'ethernet')
parser.set('service_{0}'.format(hwaddr_section_number), 'IPv4.method', 'manual')
with salt.utils.files.fopen(INTERFACES_CONFIG, 'w') as config_file:
parser.write(config_file)
return True
def set_static_all(interface, address, netmask, gateway, nameservers=None):
'''
Configure specified adapter to use ipv4 manual settings
Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot.
:param str interface: interface label
:param str address: ipv4 address
:param str netmask: ipv4 netmask
:param str gateway: ipv4 gateway
:param str nameservers: list of nameservers servers separated by spaces (Optional)
:return: True if the settings were applied, otherwise an exception will be thrown.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' ip.set_static_all interface-label address netmask gateway nameservers
'''
validate, msg = _validate_ipv4([address, netmask, gateway])
if not validate:
raise salt.exceptions.CommandExecutionError(msg)
if nameservers:
validate, msg = _space_delimited_list(nameservers)
if not validate:
raise salt.exceptions.CommandExecutionError(msg)
if not isinstance(nameservers, list):
nameservers = nameservers.split(' ')
if __grains__['lsb_distrib_id'] == 'nilrt':
initial_mode = _get_adapter_mode_info(interface)
_save_config(interface, 'Mode', 'TCPIP')
_save_config(interface, 'dhcpenabled', '0')
_save_config(interface, 'linklocalenabled', '0')
_save_config(interface, 'IP_Address', address)
_save_config(interface, 'Subnet_Mask', netmask)
_save_config(interface, 'Gateway', gateway)
if nameservers:
_save_config(interface, 'DNS_Address', nameservers[0])
if initial_mode == 'ethercat':
__salt__['system.set_reboot_required_witnessed']()
else:
_restart(interface)
return True
if interface in [x.name for x in pyiface.getIfaces()]:
return _configure_static_interface(interface, **{'ip': address,
'dns': ','.join(nameservers) if nameservers else '\'\'',
'netmask': netmask, 'gateway': gateway})
raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface))
def get_interface(iface):
'''
Returns details about given interface.
CLI Example:
.. code-block:: bash
salt '*' ip.get_interface eth0
'''
_interfaces = get_interfaces_details()
for _interface in _interfaces['interfaces']:
if _interface['connectionid'] == iface:
return _dict_to_string(_interface)
return None
def build_interface(iface, iface_type, enabled, **settings):
'''
Build an interface script for a network interface.
CLI Example:
.. code-block:: bash
salt '*' ip.build_interface eth0 eth <settings>
'''
if __grains__['lsb_distrib_id'] == 'nilrt':
raise salt.exceptions.CommandExecutionError('Not supported in this version.')
if iface_type != 'eth':
raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type))
if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp
set_dhcp_linklocal_all(iface)
elif settings['proto'] != 'static':
exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto'])
raise salt.exceptions.CommandExecutionError(exc_msg)
else:
address = settings['ipaddr']
netmask = settings['netmask']
gateway = settings['gateway']
dns = []
for key, val in six.iteritems(settings):
if 'dns' in key or 'domain' in key:
dns += val
set_static_all(iface, address, netmask, gateway, dns)
if enabled:
up(iface)
return get_interface(iface)
def build_network_settings(**settings):
'''
Build the global network script.
CLI Example:
.. code-block:: bash
salt '*' ip.build_network_settings <settings>
'''
if __grains__['lsb_distrib_id'] == 'nilrt':
raise salt.exceptions.CommandExecutionError('Not supported in this version.')
changes = []
if 'networking' in settings:
if settings['networking'] in _CONFIG_TRUE:
__salt__['service.enable']('connman')
else:
__salt__['service.disable']('connman')
if 'hostname' in settings:
new_hostname = settings['hostname'].split('.', 1)[0]
settings['hostname'] = new_hostname
old_hostname = __salt__['network.get_hostname']
if new_hostname != old_hostname:
__salt__['network.mod_hostname'](new_hostname)
changes.append('hostname={0}'.format(new_hostname))
return changes
def get_network_settings():
'''
Return the contents of the global network script.
CLI Example:
.. code-block:: bash
salt '*' ip.get_network_settings
'''
if __grains__['lsb_distrib_id'] == 'nilrt':
raise salt.exceptions.CommandExecutionError('Not supported in this version.')
settings = []
networking = 'no' if _get_state() == 'offline' else 'yes'
settings.append('networking={0}'.format(networking))
hostname = __salt__['network.get_hostname']
settings.append('hostname={0}'.format(hostname))
return settings
def apply_network_settings(**settings):
'''
Apply global network configuration.
CLI Example:
.. code-block:: bash
salt '*' ip.apply_network_settings
'''
if __grains__['lsb_distrib_id'] == 'nilrt':
raise salt.exceptions.CommandExecutionError('Not supported in this version.')
if 'require_reboot' not in settings:
settings['require_reboot'] = False
if 'apply_hostname' not in settings:
settings['apply_hostname'] = False
hostname_res = True
if settings['apply_hostname'] in _CONFIG_TRUE:
if 'hostname' in settings:
hostname_res = __salt__['network.mod_hostname'](settings['hostname'])
else:
log.warning(
'The network state sls is trying to apply hostname '
'changes but no hostname is defined.'
)
hostname_res = False
res = True
if settings['require_reboot'] in _CONFIG_TRUE:
log.warning(
'The network state sls is requiring a reboot of the system to '
'properly apply network configuration.'
)
res = True
else:
stop = __salt__['service.stop']('connman')
time.sleep(2)
res = stop and __salt__['service.start']('connman')
return hostname_res and res
| 34.513359
| 119
| 0.644401
|
c9d244aee0f51a2ff824a92d9e8c9252c5fc1944
| 5,400
|
py
|
Python
|
falcon/routing/util.py
|
jxub/falcon
|
7d6c06a9e8c8ec38a3cad2e5f29236b592e656a6
|
[
"Apache-2.0"
] | null | null | null |
falcon/routing/util.py
|
jxub/falcon
|
7d6c06a9e8c8ec38a3cad2e5f29236b592e656a6
|
[
"Apache-2.0"
] | null | null | null |
falcon/routing/util.py
|
jxub/falcon
|
7d6c06a9e8c8ec38a3cad2e5f29236b592e656a6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 by Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Routing utilities."""
import re
from falcon import COMBINED_METHODS, responders
class SuffixedMethodNotFoundError(Exception):
def __init__(self, message):
super(SuffixedMethodNotFoundError, self).__init__(message)
self.message = message
# NOTE(kgriffs): Published method; take care to avoid breaking changes.
def compile_uri_template(template):
"""Compile the given URI template string into a pattern matcher.
This function can be used to construct custom routing engines that
iterate through a list of possible routes, attempting to match
an incoming request against each route's compiled regular expression.
Each field is converted to a named group, so that when a match
is found, the fields can be easily extracted using
:py:meth:`re.MatchObject.groupdict`.
This function does not support the more flexible templating
syntax used in the default router. Only simple paths with bracketed
field expressions are recognized. For example::
/
/books
/books/{isbn}
/books/{isbn}/characters
/books/{isbn}/characters/{name}
Also, note that if the template contains a trailing slash character,
it will be stripped in order to normalize the routing logic.
Args:
template(str): The template to compile. Note that field names are
restricted to ASCII a-z, A-Z, and the underscore character.
Returns:
tuple: (template_field_names, template_regex)
"""
if not isinstance(template, str):
raise TypeError('uri_template is not a string')
if not template.startswith('/'):
raise ValueError("uri_template must start with '/'")
if '//' in template:
raise ValueError("uri_template may not contain '//'")
if template != '/' and template.endswith('/'):
template = template[:-1]
# template names should be able to start with A-Za-z
# but also contain 0-9_ in the remaining portion
expression_pattern = r'{([a-zA-Z]\w*)}'
# Get a list of field names
fields = set(re.findall(expression_pattern, template))
# Convert Level 1 var patterns to equivalent named regex groups
escaped = re.sub(r'[\.\(\)\[\]\?\*\+\^\|]', r'\\\g<0>', template)
pattern = re.sub(expression_pattern, r'(?P<\1>[^/]+)', escaped)
pattern = r'\A' + pattern + r'\Z'
return fields, re.compile(pattern, re.IGNORECASE)
def map_http_methods(resource, suffix=None):
"""Maps HTTP methods (e.g., GET, POST) to methods of a resource object.
Args:
resource: An object with *responder* methods, following the naming
convention *on_\\**, that correspond to each method the resource
supports. For example, if a resource supports GET and POST, it
should define ``on_get(self, req, resp)`` and
``on_post(self, req, resp)``.
Keyword Args:
suffix (str): Optional responder name suffix for this route. If
a suffix is provided, Falcon will map GET requests to
``on_get_{suffix}()``, POST requests to ``on_post_{suffix}()``,
etc.
Returns:
dict: A mapping of HTTP methods to explicitly defined resource responders.
"""
method_map = {}
for method in COMBINED_METHODS:
try:
responder_name = 'on_' + method.lower()
if suffix:
responder_name += '_' + suffix
responder = getattr(resource, responder_name)
except AttributeError:
# resource does not implement this method
pass
else:
# Usually expect a method, but any callable will do
if callable(responder):
method_map[method] = responder
# If suffix is specified and doesn't map to any methods, raise an error
if suffix and not method_map:
raise SuffixedMethodNotFoundError('No responders found for the specified suffix')
return method_map
def set_default_responders(method_map):
"""Maps HTTP methods not explicitly defined on a resource to default responders.
Args:
method_map: A dict with HTTP methods mapped to responders explicitly
defined in a resource.
"""
# Attach a resource for unsupported HTTP methods
allowed_methods = sorted(list(method_map.keys()))
if 'OPTIONS' not in method_map:
# OPTIONS itself is intentionally excluded from the Allow header
opt_responder = responders.create_default_options(allowed_methods)
method_map['OPTIONS'] = opt_responder
allowed_methods.append('OPTIONS')
na_responder = responders.create_method_not_allowed(allowed_methods)
for method in COMBINED_METHODS:
if method not in allowed_methods:
method_map[method] = na_responder
| 34.83871
| 89
| 0.675556
|
c307a3782e4a075419eda1a56a2d8103b8298ae6
| 2,366
|
py
|
Python
|
rlpyt/experiments/configs/mujoco/pg/mujoco_ppo.py
|
DavidSlayback/rlpyt
|
445adbd3917842caae0cae0d06e4b2866c8f1258
|
[
"MIT"
] | null | null | null |
rlpyt/experiments/configs/mujoco/pg/mujoco_ppo.py
|
DavidSlayback/rlpyt
|
445adbd3917842caae0cae0d06e4b2866c8f1258
|
[
"MIT"
] | null | null | null |
rlpyt/experiments/configs/mujoco/pg/mujoco_ppo.py
|
DavidSlayback/rlpyt
|
445adbd3917842caae0cae0d06e4b2866c8f1258
|
[
"MIT"
] | null | null | null |
import copy
from rlpyt.envs.wrappers import ClipActionsWrapper, RLPYT_WRAPPER_KEY
configs = dict()
env_args = dict()
env_args[RLPYT_WRAPPER_KEY] = [ClipActionsWrapper]
config = dict(
agent=dict(),
algo=dict(
discount=0.99,
learning_rate=3e-4,
clip_grad_norm=1e6,
entropy_loss_coeff=0.0,
gae_lambda=0.95,
minibatches=32,
epochs=10,
ratio_clip=0.2,
normalize_advantage=True,
linear_lr_schedule=True,
# bootstrap_timelimit=False,
),
env=dict(id="Hopper-v3", **env_args),
model=dict(normalize_observation=False),
optim=dict(),
runner=dict(
n_steps=1e6,
log_interval_steps=2048 * 10,
),
sampler=dict(
batch_T=2048,
batch_B=1,
max_decorrelation_steps=0,
),
)
configs["ppo_1M_serial"] = config
config = copy.deepcopy(configs["ppo_1M_serial"])
config["sampler"]["batch_B"] = 8
config["sampler"]["batch_T"] = 256
configs["ppo_1M_cpu"] = config
config = copy.deepcopy(configs["ppo_1M_serial"])
config["algo"]["minibatches"] = 1
config["algo"]["epochs"] = 32
configs["ppo_32ep_1mb"] = config
config = dict(
agent=dict(),
algo=dict(
discount=0.99,
learning_rate=3e-4,
clip_grad_norm=0.5,
value_loss_coeff=0.5,
entropy_loss_coeff=0.0,
gae_lambda=0.95,
minibatches=32,
epochs=10,
ratio_clip=0.2,
normalize_advantage=True,
linear_lr_schedule=False,
# bootstrap_timelimit=False,
clip_vf_loss=False,
normalize_rewards='return',
rew_clip=(-10,10),
rew_min_var=(1e-6)
),
env=dict(id="HalfCheetah-Directional-v0", **env_args),
model=dict(normalize_observation=True, baselines_init=True),
optim=dict(),
runner=dict(
seed=None,
n_steps=1e6,
log_interval_steps=1e3,
transfer=True,
transfer_iter=150,
log_traj_window=10
),
sampler=dict(
batch_T=256,
batch_B=8,
max_decorrelation_steps=100,
),
)
configs["ppo_1M_halfcheetahtransfer"] = config
config = copy.deepcopy(configs["ppo_1M_halfcheetahtransfer"])
config["env"] = dict(id='TMaze-TwoGoal-v0', **env_args)
config["algo"]["normalize_rewards"] = None
config["runner"]["log_traj_window"] = 20
configs["ppo_1M_TMaze"] = config
| 26
| 69
| 0.633136
|
1a33e4cacf6da32d0fea895ef4bf0176cbf0a4a4
| 8,919
|
py
|
Python
|
alipay/aop/api/domain/AlipayAssetCardNewtemplateCreateModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayAssetCardNewtemplateCreateModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayAssetCardNewtemplateCreateModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.CardFundInfo import CardFundInfo
from alipay.aop.api.domain.CardCreditInfo import CardCreditInfo
class AlipayAssetCardNewtemplateCreateModel(object):
def __init__(self):
self._account_model = None
self._assets_code = None
self._biz_from = None
self._card_fund_infos = None
self._card_model = None
self._card_name = None
self._creator = None
self._credit_info = None
self._extend_info = None
self._operator = None
self._out_biz_no = None
self._partner_id = None
self._period_type = None
self._product_code = None
self._settle_user_id = None
@property
def account_model(self):
return self._account_model
@account_model.setter
def account_model(self, value):
self._account_model = value
@property
def assets_code(self):
return self._assets_code
@assets_code.setter
def assets_code(self, value):
self._assets_code = value
@property
def biz_from(self):
return self._biz_from
@biz_from.setter
def biz_from(self, value):
self._biz_from = value
@property
def card_fund_infos(self):
return self._card_fund_infos
@card_fund_infos.setter
def card_fund_infos(self, value):
if isinstance(value, list):
self._card_fund_infos = list()
for i in value:
if isinstance(i, CardFundInfo):
self._card_fund_infos.append(i)
else:
self._card_fund_infos.append(CardFundInfo.from_alipay_dict(i))
@property
def card_model(self):
return self._card_model
@card_model.setter
def card_model(self, value):
self._card_model = value
@property
def card_name(self):
return self._card_name
@card_name.setter
def card_name(self, value):
self._card_name = value
@property
def creator(self):
return self._creator
@creator.setter
def creator(self, value):
self._creator = value
@property
def credit_info(self):
return self._credit_info
@credit_info.setter
def credit_info(self, value):
if isinstance(value, CardCreditInfo):
self._credit_info = value
else:
self._credit_info = CardCreditInfo.from_alipay_dict(value)
@property
def extend_info(self):
return self._extend_info
@extend_info.setter
def extend_info(self, value):
self._extend_info = value
@property
def operator(self):
return self._operator
@operator.setter
def operator(self, value):
self._operator = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def period_type(self):
return self._period_type
@period_type.setter
def period_type(self, value):
self._period_type = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def settle_user_id(self):
return self._settle_user_id
@settle_user_id.setter
def settle_user_id(self, value):
self._settle_user_id = value
def to_alipay_dict(self):
params = dict()
if self.account_model:
if hasattr(self.account_model, 'to_alipay_dict'):
params['account_model'] = self.account_model.to_alipay_dict()
else:
params['account_model'] = self.account_model
if self.assets_code:
if hasattr(self.assets_code, 'to_alipay_dict'):
params['assets_code'] = self.assets_code.to_alipay_dict()
else:
params['assets_code'] = self.assets_code
if self.biz_from:
if hasattr(self.biz_from, 'to_alipay_dict'):
params['biz_from'] = self.biz_from.to_alipay_dict()
else:
params['biz_from'] = self.biz_from
if self.card_fund_infos:
if isinstance(self.card_fund_infos, list):
for i in range(0, len(self.card_fund_infos)):
element = self.card_fund_infos[i]
if hasattr(element, 'to_alipay_dict'):
self.card_fund_infos[i] = element.to_alipay_dict()
if hasattr(self.card_fund_infos, 'to_alipay_dict'):
params['card_fund_infos'] = self.card_fund_infos.to_alipay_dict()
else:
params['card_fund_infos'] = self.card_fund_infos
if self.card_model:
if hasattr(self.card_model, 'to_alipay_dict'):
params['card_model'] = self.card_model.to_alipay_dict()
else:
params['card_model'] = self.card_model
if self.card_name:
if hasattr(self.card_name, 'to_alipay_dict'):
params['card_name'] = self.card_name.to_alipay_dict()
else:
params['card_name'] = self.card_name
if self.creator:
if hasattr(self.creator, 'to_alipay_dict'):
params['creator'] = self.creator.to_alipay_dict()
else:
params['creator'] = self.creator
if self.credit_info:
if hasattr(self.credit_info, 'to_alipay_dict'):
params['credit_info'] = self.credit_info.to_alipay_dict()
else:
params['credit_info'] = self.credit_info
if self.extend_info:
if hasattr(self.extend_info, 'to_alipay_dict'):
params['extend_info'] = self.extend_info.to_alipay_dict()
else:
params['extend_info'] = self.extend_info
if self.operator:
if hasattr(self.operator, 'to_alipay_dict'):
params['operator'] = self.operator.to_alipay_dict()
else:
params['operator'] = self.operator
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
if self.period_type:
if hasattr(self.period_type, 'to_alipay_dict'):
params['period_type'] = self.period_type.to_alipay_dict()
else:
params['period_type'] = self.period_type
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.settle_user_id:
if hasattr(self.settle_user_id, 'to_alipay_dict'):
params['settle_user_id'] = self.settle_user_id.to_alipay_dict()
else:
params['settle_user_id'] = self.settle_user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayAssetCardNewtemplateCreateModel()
if 'account_model' in d:
o.account_model = d['account_model']
if 'assets_code' in d:
o.assets_code = d['assets_code']
if 'biz_from' in d:
o.biz_from = d['biz_from']
if 'card_fund_infos' in d:
o.card_fund_infos = d['card_fund_infos']
if 'card_model' in d:
o.card_model = d['card_model']
if 'card_name' in d:
o.card_name = d['card_name']
if 'creator' in d:
o.creator = d['creator']
if 'credit_info' in d:
o.credit_info = d['credit_info']
if 'extend_info' in d:
o.extend_info = d['extend_info']
if 'operator' in d:
o.operator = d['operator']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'partner_id' in d:
o.partner_id = d['partner_id']
if 'period_type' in d:
o.period_type = d['period_type']
if 'product_code' in d:
o.product_code = d['product_code']
if 'settle_user_id' in d:
o.settle_user_id = d['settle_user_id']
return o
| 33.404494
| 82
| 0.595134
|
98ef2aff1c3ec72e627dbc374f8cb026d12e7d75
| 2,418
|
py
|
Python
|
database/db_connector.py
|
CcEeCcIi/DatabasePortfolio
|
e675fb8f16cffd911cb314704c39cc94fb6b128c
|
[
"MIT"
] | null | null | null |
database/db_connector.py
|
CcEeCcIi/DatabasePortfolio
|
e675fb8f16cffd911cb314704c39cc94fb6b128c
|
[
"MIT"
] | null | null | null |
database/db_connector.py
|
CcEeCcIi/DatabasePortfolio
|
e675fb8f16cffd911cb314704c39cc94fb6b128c
|
[
"MIT"
] | null | null | null |
#code from OSU Flask Tutorial
import MySQLdb
import os
from dotenv import load_dotenv, find_dotenv
# Load our environment variables from the .env file in the root of our project.
load_dotenv(find_dotenv())
# Set the variables in our application with those environment variables
host = os.environ.get("340DBHOST")
user = os.environ.get("340DBUSER")
passwd = os.environ.get("340DBPW")
db = os.environ.get("340DB")
def connect_to_database(host = host, user = user, passwd = passwd, db = db):
'''
connects to a database and returns a database objects
'''
db_connection = MySQLdb.connect(host,user,passwd,db)
return db_connection
def execute_query(db_connection = None, query = None, query_params = ()):
'''
executes a given SQL query on the given db connection and returns a Cursor object
db_connection: a MySQLdb connection object created by connect_to_database()
query: string containing SQL query
returns: A Cursor object as specified at https://www.python.org/dev/peps/pep-0249/#cursor-objects.
You need to run .fetchall() or .fetchone() on that object to actually acccess the results.
'''
if db_connection is None:
print("No connection to the database found! Have you called connect_to_database() first?")
return None
if query is None or len(query.strip()) == 0:
print("query is empty! Please pass a SQL query in query")
return None
print("Executing %s with %s" % (query, query_params));
# Create a cursor to execute query. Why? Because apparently they optimize execution by retaining a reference according to PEP0249
cursor = db_connection.cursor(MySQLdb.cursors.DictCursor)
'''
params = tuple()
#create a tuple of paramters to send with the query
for q in query_params:
params = params + (q)
'''
#TODO: Sanitize the query before executing it!!!
cursor.execute(query, query_params)
# this will actually commit any changes to the database. without this no
# changes will be committed!
db_connection.commit();
return cursor
if __name__ == '__main__':
print("Executing a sample query on the database using the credentials from db_credentials.py")
db = connect_to_database()
query = "SELECT * from bsg_people;"
results = execute_query(db, query);
print("Printing results of %s" % query)
for r in results.fetchall():
print(r)
| 36.636364
| 133
| 0.705542
|
53d41189ac6415087e44fdad1fc2c8ff236032a5
| 6,753
|
py
|
Python
|
converter/extract_fea.py
|
shijun18/XunFei_Classifier
|
f1331f1ada5b897656f7b3161b000c29e70e450f
|
[
"MIT"
] | 12
|
2021-07-14T13:16:07.000Z
|
2022-01-27T11:57:50.000Z
|
converter/extract_fea.py
|
shijun18/XunFei_Classifier
|
f1331f1ada5b897656f7b3161b000c29e70e450f
|
[
"MIT"
] | null | null | null |
converter/extract_fea.py
|
shijun18/XunFei_Classifier
|
f1331f1ada5b897656f7b3161b000c29e70e450f
|
[
"MIT"
] | 2
|
2021-06-30T07:51:36.000Z
|
2021-12-11T07:46:39.000Z
|
import os
import numpy as np
import pandas as pd
import librosa
import random
def add_noise(wav,factor):
wav_n = wav + factor*np.random.normal(0,1,len(wav))
return wav_n
def time_shifting(wav,sr,factor):
wav_roll = np.roll(wav,int(sr/factor))
return wav_roll
def time_stretch(wav,factor):
wav_stch = librosa.effects.time_stretch(wav,factor)
return wav_stch
def pitch_shifting(wav,sr,n_steps=-5):
wav_pitch_sf = librosa.effects.pitch_shift(wav,sr,n_steps=n_steps)
return wav_pitch_sf
def random_aug(wav,sr):
aug_list = ['add_noise','time_shifting','time_stretch','pitch_shifting',None]
aug_op = random.choice(aug_list)
if aug_op == 'add_noise':
factor = random.choice(range(8,12,2))
factor *= 0.001
aug_wav = add_noise(wav,factor)
elif aug_op == 'time_shifting':
factor = random.choice(range(5,20,5))
aug_wav = time_shifting(wav,sr,factor)
elif aug_op == 'time_stretch':
factor = random.choice(range(5,20,5))
factor *= 0.1
aug_wav = time_stretch(wav,factor)
elif aug_op == 'pitch_shifting':
factor = random.choice(range(-10,10,5))
aug_wav = pitch_shifting(wav,sr,n_steps=factor)
else:
aug_wav = wav
return aug_wav
def extract_audio_fea_aug(input_path,save_path,aug_times=None,label_flag=False):
assert aug_times is not None
if label_flag:
info = {
'id':[],
'label':[]
}
else:
info = {
'id':[]
}
for item in os.scandir(input_path):
y,sr=librosa.load(item.path,sr=44100,mono=True,duration=min(30,librosa.get_duration(filename=item.path)))
for aug in range(aug_times):
info['id'].append(f'{str(aug)}_{item.name}')
if label_flag:
info['label'].append(os.path.basename(input_path))
y = random_aug(y,sr)
S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128,n_fft=2048, hop_length=1024,fmax=16000)
fea_list = {
'mfcc':librosa.feature.mfcc(y, sr, S=librosa.power_to_db(S),n_mfcc=40),# mfcc 128*T
'energy':np.sum(np.square(abs(librosa.stft(y,n_fft=2048,hop_length=1024))),0), # 1*T
'chroma_stft':librosa.feature.chroma_stft(y=y, sr=sr,n_fft=2048, hop_length=1024), # 12*T
'spec_cent':librosa.feature.spectral_centroid(y=y, sr=sr,n_fft=2048, hop_length=1024), # 1*T
'spec_bw':librosa.feature.spectral_bandwidth(y=y, sr=sr,n_fft=2048, hop_length=1024), # 1*T
'rolloff':librosa.feature.spectral_rolloff(y=y, sr=sr,n_fft=2048, hop_length=1024),# 1*T
'zcr':librosa.feature.zero_crossing_rate(y,hop_length=1024)
}
for fea in fea_list.keys():
fea_val = fea_list[fea]
if len(fea_val.shape) == 1:
fea_val = np.expand_dims(fea_val,axis=0)
avg_val = np.mean(fea_val,axis=1)
max_val = np.max(fea_val,axis=1)
for i in range(len(avg_val)):
if f'{fea}_avg_{str(i)}' not in info.keys():
info[f'{fea}_avg_{str(i)}'] = []
if f'{fea}_max_{str(i)}' not in info.keys():
info[f'{fea}_max_{str(i)}'] = []
info[f'{fea}_avg_{str(i)}'].append(avg_val[i])
info[f'{fea}_max_{str(i)}'].append(max_val[i])
csv_file = pd.DataFrame(data=info)
csv_file.to_csv(save_path,index=False)
def extract_audio_fea(input_path,save_path,label_flag=False):
if label_flag:
info = {
'id':[],
'label':[]
}
else:
info = {
'id':[]
}
for item in os.scandir(input_path):
y,sr=librosa.load(item.path,sr=44100,mono=True,duration=min(30,librosa.get_duration(filename=item.path)))
info['id'].append(item.name)
if label_flag:
info['label'].append(os.path.basename(input_path))
S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128,n_fft=2048, hop_length=1024,fmax=16000)
fea_list = {
'mfcc':librosa.feature.mfcc(y, sr, S=librosa.power_to_db(S),n_mfcc=40),# mfcc 40*T
'energy':np.sum(np.square(abs(librosa.stft(y,n_fft=2048,hop_length=1024))),0), # 1*T
'chroma_stft':librosa.feature.chroma_stft(y=y, sr=sr,n_fft=2048, hop_length=1024), # 12*T
'spec_cent':librosa.feature.spectral_centroid(y=y, sr=sr,n_fft=2048, hop_length=1024), # 1*T
'spec_bw':librosa.feature.spectral_bandwidth(y=y, sr=sr,n_fft=2048, hop_length=1024), # 1*T
'rolloff':librosa.feature.spectral_rolloff(y=y, sr=sr,n_fft=2048, hop_length=1024),# 1*T
'zcr':librosa.feature.zero_crossing_rate(y,hop_length=1024)
}
for fea in fea_list.keys():
fea_val = fea_list[fea]
if len(fea_val.shape) == 1:
fea_val = np.expand_dims(fea_val,axis=0)
avg_val = np.mean(fea_val,axis=1)
max_val = np.max(fea_val,axis=1)
for i in range(len(avg_val)):
if f'{fea}_avg_{str(i)}' not in info.keys():
info[f'{fea}_avg_{str(i)}'] = []
if f'{fea}_max_{str(i)}' not in info.keys():
info[f'{fea}_max_{str(i)}'] = []
info[f'{fea}_avg_{str(i)}'].append(avg_val[i])
info[f'{fea}_max_{str(i)}'].append(max_val[i])
csv_file = pd.DataFrame(data=info)
csv_file.to_csv(save_path,index=False)
if __name__ == '__main__':
# data_path = '../dataset/Covid19/audio/train/cough/Positive'
# save_path = '../dataset/Covid19/train_positive_aug.csv'
# extract_audio_fea_aug(data_path,save_path,aug_times=5,label_flag=True)
data_path = '../dataset/Covid19/audio/train/cough/Positive'
save_path = '../dataset/Covid19/train_positive.csv'
extract_audio_fea(data_path,save_path,label_flag=True)
# data_path = '../dataset/Covid19/audio/train/cough/Negative'
# save_path = '../dataset/Covid19/train_negative.csv'
# extract_audio_fea(data_path,save_path,label_flag=True)
# data_path = '../dataset/Covid19/audio/test'
# save_path = '../dataset/Covid19/test.csv'
# extract_audio_fea(data_path,save_path,False)
# csv_p = '../dataset/Covid19/train_positive_aug.csv'
csv_p = '../dataset/Covid19/train_positive.csv'
df_p = pd.read_csv(csv_p)
csv_n = '../dataset/Covid19/train_negative.csv'
df_n = pd.read_csv(csv_n)
cat_data = pd.concat([df_p, df_n], axis=0, ignore_index=True)
save_path = '../dataset/Covid19/train_raw.csv'
cat_data.to_csv(save_path,index=False)
| 40.680723
| 114
| 0.605509
|
dd4f9172d79c361592e2d1cee0e128e585b6b86f
| 4,886
|
py
|
Python
|
dataset/cocodataset.py
|
DAMONYLY/YOLOv3
|
fae4e9533de70d187e09b97ee8b74d06deb0f037
|
[
"MIT"
] | null | null | null |
dataset/cocodataset.py
|
DAMONYLY/YOLOv3
|
fae4e9533de70d187e09b97ee8b74d06deb0f037
|
[
"MIT"
] | null | null | null |
dataset/cocodataset.py
|
DAMONYLY/YOLOv3
|
fae4e9533de70d187e09b97ee8b74d06deb0f037
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import torch
from torch.utils.data import Dataset
import cv2
from pycocotools.coco import COCO
from utils.utils import *
class COCODataset(Dataset):
"""
COCO dataset class.
"""
def __init__(self, model_type, data_dir='COCO', json_file='instances_train2017.json',
name='train2017', img_size=416,
augmentation=None, min_size=1, debug=False):
"""
COCO dataset initialization. Annotation data are read into memory by COCO API.
Args:
model_type (str): model name specified in config file
data_dir (str): dataset root directory
json_file (str): COCO json file name
name (str): COCO data name (e.g. 'train2017' or 'val2017')
img_size (int): target image size after pre-processing
min_size (int): bounding boxes smaller than this are ignored
debug (bool): if True, only one data id is selected from the dataset
"""
self.data_dir = data_dir
self.json_file = json_file
self.model_type = model_type
# if name == 'train2017':
# self.coco = COCO(self.data_dir+'coco_2017_train'+self.json_file)
# else:
self.coco = COCO(self.data_dir + '/' + self.json_file)
self.ids = self.coco.getImgIds()
if debug:
self.ids = self.ids[1:2]
print("debug mode...", self.ids)
self.class_ids = sorted(self.coco.getCatIds())
self.name = name
self.max_labels = 50
self.img_size = img_size
self.min_size = min_size
self.lrflip = augmentation['LRFLIP']
self.jitter = augmentation['JITTER']
self.random_placing = augmentation['RANDOM_PLACING']
self.hue = augmentation['HUE']
self.saturation = augmentation['SATURATION']
self.exposure = augmentation['EXPOSURE']
self.random_distort = augmentation['RANDOM_DISTORT']
def __len__(self):
return len(self.ids)
def __getitem__(self, index):
"""
One image / label pair for the given index is picked up \
and pre-processed.
Args:
index (int): data index
Returns:
img (numpy.ndarray): pre-processed image
padded_labels (torch.Tensor): pre-processed label data. \
The shape is :math:`[self.max_labels, 5]`. \
each label consists of [class, xc, yc, w, h]:
class (float): class index.
xc, yc (float) : center of bbox whose values range from 0 to 1.
w, h (float) : size of bbox whose values range from 0 to 1.
info_img : tuple of h, w, nh, nw, dx, dy.
h, w (int): original shape of the image
nh, nw (int): shape of the resized image without padding
dx, dy (int): pad size
id_ (int): same as the input index. Used for evaluation.
"""
id_ = self.ids[index]
anno_ids = self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=None)
annotations = self.coco.loadAnns(anno_ids)
lrflip = False
if np.random.rand() > 0.5 and self.lrflip == True:
lrflip = True
# load image and preprocess
img_file = os.path.join(self.data_dir, self.name,
'{:012}'.format(id_) + '.jpg')
img = cv2.imread(img_file)
if self.json_file == 'instances_val5k.json' and img is None:
img_file = os.path.join(self.data_dir, 'train2017',
'{:012}'.format(id_) + '.jpg')
img = cv2.imread(img_file)
assert img is not None
img, info_img = preprocess(img, self.img_size, jitter=self.jitter,
random_placing=self.random_placing)
if self.random_distort:
img = random_distort(img, self.hue, self.saturation, self.exposure)
img = np.transpose(img / 255., (2, 0, 1))
if lrflip:
img = np.flip(img, axis=2).copy()
# load labels
labels = []
for anno in annotations:
if anno['bbox'][2] > self.min_size and anno['bbox'][3] > self.min_size:
labels.append([])
labels[-1].append(self.class_ids.index(anno['category_id']))
labels[-1].extend(anno['bbox'])
padded_labels = np.zeros((self.max_labels, 5))
if len(labels) > 0:
labels = np.stack(labels)
if 'YOLO' in self.model_type:
labels = label2yolobox(labels, info_img, self.img_size, lrflip)
padded_labels[range(len(labels))[:self.max_labels]
] = labels[:self.max_labels]
padded_labels = torch.from_numpy(padded_labels)
return img, padded_labels, info_img, id_
| 38.171875
| 89
| 0.57061
|
0c6fb84f0be6586966972290c73b947b4431a812
| 1,314
|
py
|
Python
|
ws-tests/config_test_study_push.py
|
OpenTreeOfLife/phylesystem-api
|
569efaed343fd3e783d57818ccba2461bf990c5a
|
[
"BSD-2-Clause"
] | 7
|
2015-01-11T06:24:41.000Z
|
2019-10-18T00:24:21.000Z
|
ws-tests/config_test_study_push.py
|
OpenTreeOfLife/phylesystem-api
|
569efaed343fd3e783d57818ccba2461bf990c5a
|
[
"BSD-2-Clause"
] | 63
|
2015-01-19T13:23:59.000Z
|
2022-02-03T13:53:14.000Z
|
ws-tests/config_test_study_push.py
|
OpenTreeOfLife/phylesystem-api
|
569efaed343fd3e783d57818ccba2461bf990c5a
|
[
"BSD-2-Clause"
] | 2
|
2015-06-14T08:25:43.000Z
|
2020-12-27T09:24:53.000Z
|
#!/usr/bin/env python
import sys, os
from opentreetesting import test_http_json_method, writable_api_host_and_oauth_or_exit
#This test should only pass if filesize is set very low in the config and/or max tree number is set to 0
DOMAIN, auth_token = writable_api_host_and_oauth_or_exit(__file__)
study = '10'
SUBMIT_URI = DOMAIN + '/phylesystem/v1/study/' + study
data = {'output_nexml2json':'1.2'}
r = test_http_json_method(SUBMIT_URI,
'GET',
data=data,
expected_status=200,
return_bool_data=True)
d = r[1]['data']
c = d['nexml'].get('^ot:testCount', 0)
if isinstance(c, list):
c = c[0]
c = c + 1
d['nexml']['^ot:testCount'] = c
starting_commit_SHA = r[1]['sha']
data = { 'nexson' : d,
'auth_token': auth_token,
'starting_commit_SHA': starting_commit_SHA,
}
r2 = test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400,
return_bool_data=True)
PUSH_URI = DOMAIN + '/push/v1/' + study
r3 = test_http_json_method(PUSH_URI,
'PUT',
expected_status=400,
return_bool_data=True)
print r3
| 34.578947
| 104
| 0.567732
|
464dce3d29dc5c9c0f2044c8f3f2986e43d0f441
| 3,891
|
py
|
Python
|
ppmessage/api/handlers/pppageonlineportaluserhandler.py
|
augustand/ppmessage
|
73beac9c75f751d5026ff7defff23732c7419b43
|
[
"Apache-2.0"
] | 6
|
2017-11-03T17:31:52.000Z
|
2020-06-14T09:14:36.000Z
|
ppmessage/api/handlers/pppageonlineportaluserhandler.py
|
augustand/ppmessage
|
73beac9c75f751d5026ff7defff23732c7419b43
|
[
"Apache-2.0"
] | null | null | null |
ppmessage/api/handlers/pppageonlineportaluserhandler.py
|
augustand/ppmessage
|
73beac9c75f751d5026ff7defff23732c7419b43
|
[
"Apache-2.0"
] | 16
|
2017-08-08T01:25:47.000Z
|
2019-09-17T16:32:06.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# Guijin Ding, dingguijin@gmail.com
#
#
from .basehandler import BaseHandler
from ppmessage.api.error import API_ERR
from ppmessage.db.models import DeviceInfo
from ppmessage.db.models import DeviceUser
from ppmessage.core.constant import API_LEVEL
import traceback
import logging
import json
import copy
class PPPageOnlinePortalUserHandler(BaseHandler):
def _detail(self, _user_list):
_devices = []
_users = []
for _user in _user_list:
_user = json.loads(_user)
_users.append(_user[0])
_devices.append(_user[1])
_redis = self.application.redis
_pi = _redis.pipeline()
for _device_uuid in _devices:
_key = DeviceInfo.__tablename__ + ".uuid." + _device_uuid
_pi.hgetall(_key)
_device_dicts = _pi.execute()
_device_dict = {}
for _device in _device_dicts:
_device_dict[_device.get("uuid")] = _device
_user_fields = ["uuid", "user_fullname", "user_email", "ppcom_browser_device_uuid", "ppcom_mobile_device_uuid"]
for _user_uuid in _users:
_key = DeviceUser.__tablename__ + ".uuid." + _user_uuid
_pi.hmget(_key, _user_fields)
_user_arrays = _pi.execute()
_user_dicts = []
for _user in _user_arrays:
_user_dicts.append(dict(zip(_users_fields, _user)))
_device_labels = ["ppcom_browser_device", "ppcom_mobile_device"]
for _user in _user_dicts:
for _label in _device_labels:
if _user.get(_label + "_uuid") != None:
_user[_label] = _device_dict.get(_user.get(_label + "_uuid"))
return _user_dicts
def initialize(self):
self.addPermission(app_uuid=True)
self.addPermission(api_level=API_LEVEL.PPKEFU)
self.addPermission(api_level=API_LEVEL.PPCONSOLE)
self.addPermission(api_level=API_LEVEL.THIRD_PARTY_KEFU)
self.addPermission(api_level=API_LEVEL.THIRD_PARTY_CONSOLE)
return
def _Task(self):
super(PPPageOnlinePortalUserHandler, self)._Task()
_request = json.loads(self.request.body)
_app_uuid = _request.get("app_uuid")
if _app_uuid == None:
logging.error("not enough parameters.")
self.setErrorCode(API_ERR.NO_PARA)
return
_page_offset = _request.get("page_offset")
if _page_offset == None or _page_offset < 0:
_page_offset = 0
_page_size = _request.get("page_size")
if _page_size == None or _page_size < 0:
_page_size = 30
_redis = self.application.redis
_key = DeviceInfo.__tablename__ + ".app_uuid." + _app_uuid + \
".is_ppcom_device.True.device_is_online.True"
_total_count = _redis.zcard(_key)
_r = self.getReturnData()
_r["total_count"] = _total_count
_r["return_count"] = 0
_r["page_size"] = _page_size
_r["page_offset"] = _page_offset
_r["list"] = []
if _total_count == 0:
logging.info("no online portal user of app: %s" % _app_uuid)
return
_offset = _page_offset * _page_size
if _offset >= _total_count:
logging.error("offset: %d > total: %d" % (_offset, _total_count))
return
_return_count = _page_size
if _offset + _page_size >= _total_count:
_return_count = _total_count - _offset
_user_list = _redis.zrevrange(_key, _offset, _offset+_return_count-1)
if _user_list == None or len(_user_list) == 0:
return
_user_list = self._detail(_user_list)
_r["list"] = _user_list
_r["return_count"] = _return_count
return
| 31.893443
| 119
| 0.614495
|
77b4edf43d6f2fa41e54e5cf24c8034a7270a861
| 8,463
|
py
|
Python
|
src/gridthings/grid.py
|
kafonek/gridthings
|
adb84fb1beedc56a9ab59c7597f32234e9a1c4bc
|
[
"BSD-3-Clause"
] | 2
|
2021-12-10T22:43:23.000Z
|
2021-12-21T13:07:00.000Z
|
src/gridthings/grid.py
|
kafonek/gridthings
|
adb84fb1beedc56a9ab59c7597f32234e9a1c4bc
|
[
"BSD-3-Clause"
] | null | null | null |
src/gridthings/grid.py
|
kafonek/gridthings
|
adb84fb1beedc56a9ab59c7597f32234e9a1c4bc
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Any, Dict, List, Optional, Tuple, Type, Union
from .cell import Cell, OutOfBoundsCell
from .collection import Collection
# A Grid represents some tabular data, the kind of thing you might analyze in Pandas
# This library is about letting you accomplish tasks that may be confusing or
# difficult in frameworks like Pandas. For instance, getting the neighboring
# values of a specific point, or cell with minimum value in a diagonal row
#
# See typed_grids.py for Grids with data validation
class Grid:
# These two _cls variables are here as entrypoints for customizing
# your Grid object, plugging in your own Cell or Collection mechanism
# They can also be inserted in init kwargs
cell_cls = Cell
collection_cls = Collection
def __init__(
self,
data: Union[Dict[int, Dict[int, Any]], List[Dict[int, Any]], str],
strip_whitespace: bool = True,
line_sep: str = "\n",
sep: Optional[str] = None,
out_of_bounds_value: Optional[Any] = None,
cell_cls: Type[Cell] = None,
collections_cls: Type[Collection] = None,
) -> None:
"""
Instantiate a Grid object from one of several data formats.
1. a dictionary of dictionaries, e.g. from df.to_dict()
2. a list of dictionaries, e.g. from df.to_dict(orient='records')
3. a string with line breaks, e.g. 'abc\ndef'
4. a string with line breaks and in-line separator, e.g. 'a,b,c\nd,e,f'
line_sep and sep only apply when data is a string.
line_sep is for the break between lines
sep is for any in-line separator
"""
self.data: Dict[int, Dict[int, Cell]] = {}
self.cell_cls = cell_cls or self.cell_cls
self.collection_cls = collections_cls or self.collection_cls
# data is stored internally as y:x:value, following pandas style
# can also think of it like row-data goes to the y position
# and column data goes to the x position
if isinstance(data, dict):
for x_pos, row_data in data.items():
for y_pos, value in row_data.items():
if y_pos not in self.data:
self.data[y_pos] = {}
self.data[y_pos][x_pos] = self.cell_cls(
y=y_pos, x=x_pos, value=value
)
elif isinstance(data, list):
for y_pos, column_data in enumerate(data):
if y_pos not in self.data:
self.data[y_pos] = {}
for x_pos, value in column_data.items():
self.data[y_pos][x_pos] = self.cell_cls(
y=y_pos, x=x_pos, value=value
)
elif isinstance(data, str):
if strip_whitespace:
data = data.strip()
for y_pos, line in enumerate(data.split(line_sep)):
if y_pos not in self.data:
self.data[y_pos] = {}
if strip_whitespace:
line = line.strip()
if sep:
# mypy doesn't like that line becomes a List[str]
# although it should recognize I just care about it being Iterable...
line = line.split(sep) # type: ignore
for x_pos, value in enumerate(line):
self.data[y_pos][x_pos] = self.cell_cls(
y=y_pos, x=x_pos, value=value
)
# Default value for OutOfBound cells when a Collection
# extends outside the grid, which can happen with .peek() and .line()
self.out_of_bounds_value = out_of_bounds_value
@property
def is_regular(self) -> bool:
"Return True if all rows are the same length"
return all(len(row) == len(self.data[0]) for row in self.data.values())
@property
def shape(self) -> Tuple[int, int]:
"Return the shape of the grid"
return len(self.data), len(self.data[0])
def __repr__(self):
if self.is_regular:
return f"<{self.__class__.__name__} shape={self.shape}>"
else:
return f"<{self.__class__.__name__} shape=(irregular)>"
def get(self, y: int, x: int) -> Cell:
"Return a Cell object at a given y, x position"
return self.data[y][x]
def get_row(self, y: int) -> Collection:
"Return the y'th row. coll = grid.get_row(0) gives the top row of the grid"
cells = list(self.data[y].values())
return self.collection_cls(cells=cells)
def get_column(self, x: int) -> Collection:
"Return the x'th column. coll = grid.get_column(0) gives the left column of the grid"
cells = [row[x] for row in self.data.values()]
return self.collection_cls(cells=cells)
# Useful for iterating through every cell in the grid. for cell in grid.flatten():
def flatten(self) -> Collection:
"Flatten the 2-d Grid into a 1-d Collection of cells"
cells = []
for row in self.data.values():
for cell in row.values():
cells.append(cell)
return Collection(cells=cells)
# Primarily useful for integration to pandas: df = pandas.DataFrame(grid.values())
def values(self) -> List[List[Any]]:
"Return the grid as a list of lists"
return [[cell.value for cell in row.values()] for row in self.data.values()]
def peek(self, y: int, x: int, y_offset: int, x_offset: int) -> Cell:
"Return a Cell object offset from a given y, x position"
y_out = y + y_offset
x_out = x + x_offset
if y_out not in self.data or x_out not in self.data[y_out]:
return OutOfBoundsCell(y=y_out, x=x_out, value=self.out_of_bounds_value)
return self.data[y_out][x_out]
def peek_left(self, y: int, x: int, distance: int = 1) -> Cell:
"Return a Cell object to the left of a given y, x position"
return self.peek(y=y, x=x, y_offset=0, x_offset=-distance)
def peek_right(self, y: int, x: int, distance: int = 1) -> Cell:
"Return a Cell object to the right of a given y, x position"
return self.peek(y=y, x=x, y_offset=0, x_offset=distance)
def peek_up(self, y: int, x: int, distance: int = 1) -> Cell:
"Return a Cell object above a given y, x position"
return self.peek(y=y, x=x, y_offset=-distance, x_offset=0)
def peek_down(self, y: int, x: int, distance: int = 1) -> Cell:
"Return a Cell object below a given y, x position"
return self.peek(y=y, x=x, y_offset=distance, x_offset=0)
def peek_linear(self, y: int, x: int, distance: int = 1) -> Collection:
"Return peek_left, peek_right, peek_up, and peek_down from a given y, x position"
cells = [
self.peek_left(y=y, x=x, distance=distance),
self.peek_right(y=y, x=x, distance=distance),
self.peek_up(y=y, x=x, distance=distance),
self.peek_down(y=y, x=x, distance=distance),
]
return self.collection_cls(cells=cells)
def peek_diagonal(self, y: int, x: int, distance: int = 1) -> Collection:
"Return peek diagonal up/left, up/right, down/left, down/right a given y, x position"
cells = [
self.peek(y=y, x=x, y_offset=-distance, x_offset=-distance),
self.peek(y=y, x=x, y_offset=-distance, x_offset=distance),
self.peek(y=y, x=x, y_offset=distance, x_offset=-distance),
self.peek(y=y, x=x, y_offset=distance, x_offset=distance),
]
return self.collection_cls(cells=cells)
def peek_all(self, y: int, x: int, distance: int = 1) -> Collection:
"Return all cells around a given y, x position"
linear_neighbors = self.peek_linear(y=y, x=x, distance=distance)
diag_neighbors = self.peek_diagonal(y=y, x=x, distance=distance)
return linear_neighbors + diag_neighbors
def line(
self, y: int, x: int, y_step: int = 0, x_step: int = 0, distance: int = 1
) -> Collection:
"Return a Collection of cells starting at a given y/x position and stepping for some distance"
cells = []
for offset in range(distance):
out_cell = self.peek(
y=y, x=x, y_offset=offset * y_step, x_offset=offset * x_step
)
cells.append(out_cell)
return self.collection_cls(cells=cells)
| 43.623711
| 102
| 0.602978
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.