hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
046f7ca774853f5d3eb7867b83c71d1422a4ad73
| 36
|
py
|
Python
|
deliverable1/test_case_05/__init__.py
|
TrackerSB/IEEEAITestChallenge2021
|
836f0cfa39a3e25a8149a9adb77c3a8270417a39
|
[
"MIT"
] | 1
|
2021-09-14T16:30:44.000Z
|
2021-09-14T16:30:44.000Z
|
deliverable1/test_case_05/__init__.py
|
TrackerSB/IEEEAITestChallenge2021
|
836f0cfa39a3e25a8149a9adb77c3a8270417a39
|
[
"MIT"
] | 22
|
2021-02-26T06:35:00.000Z
|
2021-07-16T12:37:58.000Z
|
deliverable1/test_case_05/__init__.py
|
TrackerSB/IEEEAITestChallenge2021
|
836f0cfa39a3e25a8149a9adb77c3a8270417a39
|
[
"MIT"
] | 2
|
2021-03-10T19:50:28.000Z
|
2021-08-23T08:02:36.000Z
|
from .test_case_05 import TestCase05
| 36
| 36
| 0.888889
|
2ae6ba46f28ed48df44a8138db1d245f4df6587f
| 580
|
py
|
Python
|
crypto/gain/deposit.py
|
tmackenzie/Crypto-Trading-Gains
|
4d789bb80d7adde3edb0560b17da0bda2e863f85
|
[
"MIT"
] | 1
|
2022-02-28T18:11:36.000Z
|
2022-02-28T18:11:36.000Z
|
crypto/gain/deposit.py
|
tmackenzie/crypto-trading-gains
|
4d789bb80d7adde3edb0560b17da0bda2e863f85
|
[
"MIT"
] | null | null | null |
crypto/gain/deposit.py
|
tmackenzie/crypto-trading-gains
|
4d789bb80d7adde3edb0560b17da0bda2e863f85
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from decimal import Decimal
from functools import reduce
from . import util
def usd_deposits(trxs, start_date, end_date):
start_epoch = start_date.timestamp()
end_epoch = end_date.timestamp()
total = 0
deposits = []
for trx in trxs:
if trx["trx_type"] == "receive" and util.date_is_between(start_epoch, end_epoch, trx["epoch_seconds"]) and trx["receive"] == "USD":
deposits.append(trx)
total += trx["qty"]
summary = {"total": total}
return {"summary": summary} | {"transactions": deposits}
| 29
| 139
| 0.648276
|
9655b5bd896bc75634579d27b873e08c352cfcf0
| 813
|
py
|
Python
|
app/auth/authorize_api_request.py
|
jiansoung/flask-todos-api
|
7c8aed54b042790a8b3ab11de856566005849fd4
|
[
"MIT"
] | null | null | null |
app/auth/authorize_api_request.py
|
jiansoung/flask-todos-api
|
7c8aed54b042790a8b3ab11de856566005849fd4
|
[
"MIT"
] | null | null | null |
app/auth/authorize_api_request.py
|
jiansoung/flask-todos-api
|
7c8aed54b042790a8b3ab11de856566005849fd4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from app.lib import Message
from app.lib import JsonWebToken
from app.models import User
from app.exceptions import exceptions
class AuthorizeApiRequest:
def __init__(self, headers):
self.__headers = headers
@property
def user(self):
decoded_auth_token = self.__decode_auth_token()
user_id = decoded_auth_token['user_id']
user = User.query.get(user_id)
if user is None:
raise exceptions.InvalidToken(Message.invalid_token)
return user
def __decode_auth_token(self):
if 'Authorization' not in self.__headers:
raise exceptions.MissingToken(Message.missing_token)
http_auth_header = self.__headers['Authorization'].split(' ')[-1]
return JsonWebToken.decode(http_auth_header)
| 30.111111
| 73
| 0.690037
|
1572685f61f49268f1ea72c9285105125589aaf2
| 681
|
py
|
Python
|
euler/problem_1.py
|
cooperdramsey/project-euler-solutions
|
c67a771f4544735504f7050af9a68b4bece93992
|
[
"MIT"
] | null | null | null |
euler/problem_1.py
|
cooperdramsey/project-euler-solutions
|
c67a771f4544735504f7050af9a68b4bece93992
|
[
"MIT"
] | null | null | null |
euler/problem_1.py
|
cooperdramsey/project-euler-solutions
|
c67a771f4544735504f7050af9a68b4bece93992
|
[
"MIT"
] | null | null | null |
# Problem 1
# Multiples of 3 and 5
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9.
# The sum of these multiples is 23.
# Find the sum of all the multiples of 3 or 5 below 1000.
import numpy as np
def sum_of_multiples(value):
value_list = np.arange(1, value)
value_list_3 = np.mod(value_list, 3)
value_list_5 = np.mod(value_list, 5)
result_3 = np.add(np.where(value_list_3 == 0), 1)
result_5 = np.add(np.where(value_list_5 == 0), 1)
result = np.concatenate((result_3, result_5), axis=1)
return np.sum(np.unique(result))
if __name__ == '__main__':
value = 1000
print(sum_of_multiples(value))
| 29.608696
| 97
| 0.684288
|
3e774a82e7b00e88caaeeba1ab5c1c1364553bda
| 260
|
py
|
Python
|
app/models/__init__.py
|
h1-the-swan/science_history_institute_chp_app
|
0e99dec17403dfcaa2e7fbcd0374c39a773445b1
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
h1-the-swan/science_history_institute_chp_app
|
0e99dec17403dfcaa2e7fbcd0374c39a773445b1
|
[
"MIT"
] | 2
|
2021-03-31T19:15:56.000Z
|
2021-12-13T20:10:25.000Z
|
app/models/__init__.py
|
h1-the-swan/science_history_institute_chp_app
|
0e99dec17403dfcaa2e7fbcd0374c39a773445b1
|
[
"MIT"
] | null | null | null |
"""
These imports enable us to make all defined models members of the models
module (as opposed to just their python files)
"""
from .user import * # noqa
from .miscellaneous import * # noqa
from .oral_history import * # noqa
from .entity import * # noqa
| 26
| 72
| 0.723077
|
35be1941a77bc3ec1e4cbe9a137c2fe22a78d786
| 312
|
py
|
Python
|
dev/circuitpython/examples/apds9960_simpletest.py
|
scripsi/picodeebee
|
0ec77e92f09fa8711705623482e57a5e0b702696
|
[
"MIT"
] | null | null | null |
dev/circuitpython/examples/apds9960_simpletest.py
|
scripsi/picodeebee
|
0ec77e92f09fa8711705623482e57a5e0b702696
|
[
"MIT"
] | null | null | null |
dev/circuitpython/examples/apds9960_simpletest.py
|
scripsi/picodeebee
|
0ec77e92f09fa8711705623482e57a5e0b702696
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import board
from adafruit_apds9960.apds9960 import APDS9960
i2c = board.I2C()
apds = APDS9960(i2c)
apds.enable_proximity = True
while True:
print(apds.proximity)
time.sleep(0.2)
| 19.5
| 63
| 0.733974
|
d1c54034827fa8d6fa890e01d40c81f13ee8b06b
| 20,226
|
py
|
Python
|
src/preproc/slicetime_params.py
|
erramuzpe/ruber
|
cf510a4cf9b0b15d870b6506a1593c3b2b00a3b7
|
[
"MIT"
] | 2
|
2018-11-07T07:54:34.000Z
|
2022-01-13T13:06:06.000Z
|
src/preproc/slicetime_params.py
|
erramuzpe/ruber
|
cf510a4cf9b0b15d870b6506a1593c3b2b00a3b7
|
[
"MIT"
] | null | null | null |
src/preproc/slicetime_params.py
|
erramuzpe/ruber
|
cf510a4cf9b0b15d870b6506a1593c3b2b00a3b7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Helper functions and nipype interface for
reading slice timing correction parameters specially from Siemens acquisitions
"""
import os.path as op
import numpy as np
import nibabel as nib
from nipype.interfaces.base import (BaseInterface,
TraitedSpec,
InputMultiPath,
BaseInterfaceInputSpec,
traits,)
from .._utils import check_equal, grep
from ..utils import get_trait_value
from ..preproc.dicom import split_dcm_ahdr, dcm_ascii_hdr
def slicing_mode(dcm_file):
""" Return the slicing mode of the fMRI acquisition file given
one of its DICOM files. Avoid giving the first DICOM file.
Parameters
----------
dcm_file: str
Path to the DICOM file
Returns
-------
mode: str
Choices: ('ascending', 'descending', 'interleaved')
References
----------
https://wiki.cimec.unitn.it/tiki-index.php?page=MRIBOLDfMRI
http://www.mccauslandcenter.sc.edu/CRNL/tools/stc
https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=ind03&L=SPM&D=0&P=5914721
The timing can be found out most easily by looking into some shadow
information: In any image from the syngo systems the measurement-protocol is
present (in ascii). You may find it by looking for "### ASCCONV BEGIN ###"
and "### ASCCONV END ###". Here you may also find the slice positions of the
single images.
> Now find "sSliceArray.ucMode". This is
> 0x1 for ascending
> 0x2 for descending
> 0x4 for interleaved
> In the interleaved mode, the information given by Peter Erhard is correct;
for the rest it's clear anyway.
Example
-------
import os.path as op
from glob import glob
dcmdir = '/home/alexandre/data/pet_stadhauders/hiswork/FTD/'
restdirs = [glob(op.join(op.abspath(sd), '*ep2d*')) for sd in glob(op.join(dcmdir, '*'))]
dcms = [glob(op.join(rd[0], '*.dcm'))[10] for rd in restdirs if rd]
modes = [slicing_mode(dcm) for dcm in dcms]
print(modes)
"""
_, ascconv = split_dcm_ahdr(dcm_ascii_hdr(dcm_file))
mode_code = grep(ascconv, 'sSliceArray.ucMode')[0].split('=')[1].strip()
code_modes = {'0x1': 'ascending',
'0x2': 'descending',
'0x4': 'interleaved'}
return code_modes[mode_code]
def _get_n_slices(in_file):
img = nib.load(in_file)
n_slices = 0
try:
n_slices = img.header.get_n_slices()
except:
n_slices = img.shape[2]
finally:
return n_slices
def _get_time_repetition(in_file):
tr = nib.load(in_file).header.get_dim_info()[2]
if tr is None:
raise ValueError('Could not pick the TR value from {}. Please set time_repetition in the configuration.')
if tr < 1.0 or tr > 5.0:
raise ValueError('Aborting: strange Repeat Time (TR), got {}.'.format(tr))
if tr > 5.0:
raise ValueError('Long TR often used with sparse imaging: if this is a sparse design please set the TR manually.')
if tr < 0.5:
raise ValueError('Short TR may be due to DICOM-to-NIfTI conversion. Perhaps use dcm2nii.')
return tr
def _get_time_acquisition(in_file, TR, n_slices):
if n_slices <= 0: # not only to avoid division by zero
raise ValueError('Null number of slices when calculating time '
'acquisition for {}, got {}. Please set num_slices '
'in the configuration.'.format(in_file, n_slices))
return (TR/n_slices) * (n_slices-1)
def _get_ref_slice(in_file, slice_order):
if slice_order is None:
raise ValueError('Expected a list of integers as `slice_order`, got {}.'.format(slice_order))
return slice_order[0]
def _get_slice_order(in_file, n_slices, slice_mode):
def read_slice_mode_byte(in_file):
try:
with open(in_file) as f:
f.seek(122)
slice_mode = f.read(1)
except:
return -1
else:
return slice_mode
def get_nii_slice_times(img):
# try get the slice times
try:
times = img.header.get_slice_times()
except Exception:
pass
else:
return times
def order_from_times(times):
return np.argsort(times) + 1
def calculate_slice_order(n_slices, slice_mode):
"""
Parameters
----------
n_slices: int
slice_mode: int or str
# 0: 'unknown' : ask for automatic detection of the slice order
# 1: 'seq_inc' : sequential ascending kNIFTI_SLICE_SEQ_INC = 1; %1,2,3,4
# 2: 'seq_dec' : sequential descending kNIFTI_SLICE_SEQ_DEC = 2; %4,3,2,1
# 3: 'alt_inc' : Siemens: interleaved ascending with odd number of slices,
interleaved for other vendors kNIFTI_SLICE_ALT_INC = 3; %1,3,2,4
# 4: 'alt_dec' : descending interleaved kNIFTI_SLICE_ALT_DEC = 4; %4,2,3,1
# 5: 'alt_inc2': Siemens interleaved ascending with even number of slices kNIFTI_SLICE_ALT_INC2 = 5; %2,4,1,3
# 6: 'alt_dec2': Siemens interleaved descending with even number of slices kNIFTI_SLICE_ALT_DEC2 = 6; %3,1,4,2
Returns
-------
slice_order: list of int
"""
mode_int = { 0: 'unknown',
1: 'seq_inc',
2: 'seq_dec',
3: 'alt_inc',
4: 'alt_dec',
5: 'alt_inc2',
6: 'alt_dec2',}
if isinstance(slice_mode, int):
slice_mode = mode_int[slice_mode]
choices = tuple(mode_int.values())
if slice_mode not in choices:
raise ValueError('Expected `slice_mode` to be in {}, got {}.'.format(choices,
slice_mode))
is_siemens = False
if slice_mode in ('alt_inc2', 'alt_dec2'):
is_siemens = True
if 'seq' in slice_mode: # sequential
slice_order = list(range(n_slices))
else: # interleaved
if is_siemens and '2' in slice_mode: #siemens and even number of slices
slice_order = list(range(1, n_slices, 2)) + list(range(0, n_slices, 2))
else:
slice_order = list(range(0, n_slices, 2)) + list(range(1, n_slices, 2))
if 'dec' in slice_mode: # descending
slice_order = [n_slices - 1 - i for i in slice_order]
return slice_order
# starts the function
if slice_mode == 'unknown':
# check if the slice times are in the NifTI header
img = nib.load(in_file)
times = get_nii_slice_times(img)
if times is not None:
return order_from_times(times)
# read the slice mode code from the file
if slice_mode == 'unknown':
slice_mode = read_slice_mode_byte(in_file)
if slice_mode <= 0:
slice_mode = 'unknown'
if slice_mode == 'unknown':
raise AttributeError("Don't have enough information to calculate the "
"slice order from {}. Please set the slice_mode argument.".format(in_file))
return calculate_slice_order(n_slices, slice_mode)
class STCParameters(object):
""" Class to calculate the parameters needed for slice timing correction.
Some options are automated for Siemens acquisitions.
Auto detection of slice order, i.e., slice_order == [0] and slice_mode == 'unknown'
only works for images from Siemens and converted with dcm2nii from Nov 2013 or later.
Siemens have unusual interleaving
- http://cbs.fas.harvard.edu/node/559#slice_order
- https://wiki.cimec.unitn.it/tiki-index.php?page=MRIBOLDfMRI
This class is based on the script by Chris Rorden's Neuropsychology Lab in:
- http://www.mccauslandcenter.sc.edu/CRNL/tools/stc
This is a callable class so you can instance this class in a nipype Function object.
See `slice_timing_params` in this module after the declaration of this class.
"""
def __init__(self):
self.in_files = None
self.num_slices = 0
self.slice_order = None
self.time_repetition = None
self.time_acquisition = None
self.ref_slice = None
self.slice_mode = 'unknown'
def fit(self, in_files,
num_slices = 0,
ref_slice = None,
slice_order = None,
time_acquisition = None,
time_repetition = None,
slice_mode = 'unknown'):
"""
Parameters
----------
in_files: str
Path to the input files
num_slices: int
Number of slices of `in_files`.
ref_slice: int
Index of the reference slice
slice_order: list of ints
List of integers with the order in which slices are acquired
time_acquisition: int
Time of volume acquisition. usually calculated as TR-(TR/num_slices)
time_repetition: int
The time repetition (TR) of the input dataset in seconds
Default: 0
If left to default will read the TR from the nifti image header.
slice_mode: str
Choices:
'unknown': auto detect if images are from Siemens and converted with dcm2nii from Nov 2013 or later #kNIFTI_SLICE_UNKNOWN
'seq_inc': sequential ascending kNIFTI_SLICE_SEQ_INC = 1; %1,2,3,4
'seq_dec': sequential descending kNIFTI_SLICE_SEQ_DEC = 2; %4,3,2,1
'alt_inc': Siemens: interleaved ascending with odd number of slices,
interleaved for other vendors kNIFTI_SLICE_ALT_INC = 3; %1,3,2,4
'alt_dec': descending interleaved kNIFTI_SLICE_ALT_DEC = 4; %4,2,3,1
'alt_inc2': Siemens interleaved ascending with even number of slices kNIFTI_SLICE_ALT_INC2 = 5; %2,4,1,3
'alt_dec2': Siemens interleaved descending with even number of slices kNIFTI_SLICE_ALT_DEC2 = 6; %3,1,4,2
Default: 'unknown'
If left to default will try to detect the TR from the nifti image header, if it doesn't work
an AttributeError exception will be raise.
Returns
-------
num_slices
ref_slice
slice_order
time_acquisition
time_repetition
"""
self.in_files = in_files
self.num_slices = num_slices
self.slice_order = slice_order
self.time_repetition = time_repetition
self.time_acquisition = time_acquisition
self.ref_slice = ref_slice
self.slice_mode = slice_mode
_ = self.set_num_slices()
_ = self.set_slice_order()
_ = self.set_time_repetition()
_ = self.set_time_acquisition()
_ = self.set_ref_slice()
return (self.num_slices,
self.ref_slice,
self.slice_order,
self.time_acquisition,
self.time_repetition)
def _check_in_files(self):
if isinstance(self.in_files, str):
in_files = [self.in_files]
else:
in_files = self.in_files
for f in in_files:
if not op.exists(f):
raise IOError('Expected an existing file in `in_files`')
return in_files
def _check_all_equal(self, func, error_msg=None, **kwargs):
in_files = self._check_in_files()
values = [func(f, **kwargs) for f in in_files]
if not check_equal(values):
if error_msg is None:
error_msg = 'The values from {} are not the same, got {{}}.'.format(func.__name__)
raise ValueError(error_msg.format(values))
return values[0]
def set_time_acquisition(self):
if self.time_acquisition is not None:
return self.time_acquisition
n_slices = self.set_num_slices()
tr = self.set_time_repetition()
error_msg = 'The time acquisition calculated from all the `in_files` are not the same, got {}.'
self.time_acquisition = self._check_all_equal(_get_time_acquisition,
error_msg,
TR=tr,
n_slices=n_slices)
return self.time_acquisition
def set_num_slices(self):
if self.num_slices is not None:
if self.num_slices > 0:
return self.num_slices
error_msg = 'The number of z slices for all `in_files` are not the same, got {}.'
self.num_slices = self._check_all_equal(_get_n_slices, error_msg)
return self.num_slices
def set_time_repetition(self):
if self.time_repetition is not None:
return self.time_repetition
error_msg = 'The TR calculated from all the `in_files` are not the same, got {}.'
self.time_repetition = self._check_all_equal(_get_time_repetition, error_msg)
return self.time_repetition
def set_slice_order(self):
if self.slice_order is not None:
return self.slice_order
n_slices = self.set_num_slices()
error_msg = 'The slice order for all `in_files` are not the same, got {}.'
self.slice_order = self._check_all_equal(_get_slice_order,
error_msg,
n_slices=n_slices,
slice_mode=self.slice_mode)
return self.slice_order
def set_ref_slice(self):
"""set slice order to the first slice http://www.alivelearn.net/?p=1037
slice_order[0] is first acquired slice, set it as the refernece
"""
if self.ref_slice is not None:
return self.slice_order
slice_order = self.set_slice_order()
error_msg = 'The reference slice for all `in_files` are not the same, got {}.'
self.ref_slice = self._check_all_equal(_get_ref_slice, error_msg,
slice_order=slice_order)
return self.ref_slice
class STCParametersInputSpec(BaseInterfaceInputSpec):
in_files = InputMultiPath(traits.File(desc="fMRI image file(s) from where to obtain the slice time "
"correction parameters.", exists=True, mandatory=True))
num_slices = traits.Int (desc="Number of slices (depends on acquisition direction).")
slice_order = traits.ListInt(desc="List of integers with the order in which slices are acquired.")
time_repetition = traits.Float (desc="The time repetition (TR) of the input dataset in seconds. "
"If left to default will read the TR from the nifti image header.", default=0)
time_acquisition = traits.Float (desc="Time of volume acquisition. usually calculated as TR-(TR/num_slices).")
ref_slice = traits.Int (desc="Index of the slice of reference.")
slice_mode = traits.Enum ('unknown', 'seq_inc', 'seq_dec', 'alt_inc', 'alt_dec', 'alt_inc2', 'alt_dec2',
desc="Slicing mode of the acquisition. \n"
"Choices: \n"
" 'unknown': auto detect if images are from Siemens and converted with dcm2nii"
" from Nov 2013 or later #kNIFTI_SLICE_UNKNOWN\n"
" 'seq_inc': sequential ascending kNIFTI_SLICE_SEQ_INC = 1; %1,2,3,4\n"
" 'seq_dec': sequential descending kNIFTI_SLICE_SEQ_DEC = 2; %4,3,2,1\n"
" 'alt_inc': Siemens: interleaved ascending with odd number of slices,"
" interleaved for other vendors kNIFTI_SLICE_ALT_INC = 3; %1,3,2,4\n"
" 'alt_dec': descending interleaved kNIFTI_SLICE_ALT_DEC = 4; %4,2,3,1\n"
" 'alt_inc2': Siemens interleaved ascending with even number of slices kNIFTI_SLICE_ALT_INC2 = 5; %2,4,1,3\n"
" 'alt_dec2': Siemens interleaved descending with even number of slices kNIFTI_SLICE_ALT_DEC2 = 6; %3,1,4,2\n"
"If left to default will try to detect the TR from the nifti image header, if it doesn't work"
"an AttributeError exception will be raise.", default='unknown')
class STCParametersOutputSpec(TraitedSpec):
in_files = InputMultiPath(traits.File(desc="fMRI image file from where to obtain the slice time correction "
"parameters.", exists=True, mandatory=True))
num_slices = traits.Int (desc="Number of slices (depends on acquisition direction).")
slice_order = traits.ListInt(desc="List of integers with the order in which slices are acquired.")
time_repetition = traits.Float (desc="The time repetition (TR) of the input dataset in seconds. "
"If left to default will read the TR from the nifti image header.", default=0)
time_acquisition = traits.Float (desc="Time of volume acquisition. usually calculated as TR-(TR/num_slices).")
ref_slice = traits.Int (desc="Index of the slice of reference.")
class STCParametersInterface(BaseInterface):
"""
Class to calculate the parameters needed for slice timing correction.
Some options are automated for Siemens acquisitions.
Check the STCParameters class docstring for more info.
"""
input_spec = STCParametersInputSpec
output_spec = STCParametersOutputSpec
def _run_interface(self, runtime):
num_slices = get_trait_value(self.inputs, 'num_slices', default=0)
ref_slice = get_trait_value(self.inputs, 'ref_slice', default=None)
slice_order = get_trait_value(self.inputs, 'slice_order', default=None)
time_acquisition = get_trait_value(self.inputs, 'time_acquisition', default=None)
time_repetition = get_trait_value(self.inputs, 'time_repetition', default=None)
slice_mode = get_trait_value(self.inputs, 'slice_mode', default='unknown')
self.stc_params = STCParameters()
(self._num_slices,
self._ref_slice,
self._slice_order,
self._time_acquisition,
self._time_repetition) = self.stc_params.fit(in_files = self.inputs.in_files,
num_slices = num_slices,
ref_slice = ref_slice,
slice_order = slice_order,
time_acquisition = time_acquisition,
time_repetition = time_repetition,
slice_mode = slice_mode,
)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['in_files' ] = self.inputs.in_files
outputs['num_slices' ] = self._num_slices
outputs['ref_slice' ] = self._ref_slice
outputs['slice_order' ] = self._slice_order
outputs['time_acquisition'] = self._time_acquisition
outputs['time_repetition' ] = self._time_repetition
return outputs
| 41.617284
| 151
| 0.582963
|
438292857390bbfd3ffca0527ea431eff8652328
| 19,550
|
py
|
Python
|
nnunet/postprocessing/connected_components.py
|
gasperpodobnik/nnUNet
|
f11906b13344db9f54e303378748a0defdea8331
|
[
"Apache-2.0"
] | null | null | null |
nnunet/postprocessing/connected_components.py
|
gasperpodobnik/nnUNet
|
f11906b13344db9f54e303378748a0defdea8331
|
[
"Apache-2.0"
] | null | null | null |
nnunet/postprocessing/connected_components.py
|
gasperpodobnik/nnUNet
|
f11906b13344db9f54e303378748a0defdea8331
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
from copy import deepcopy
from multiprocessing.pool import Pool
import numpy as np
from nnunet.configuration import default_num_threads
from nnunet.evaluation.evaluator import aggregate_scores
from scipy.ndimage import label
import SimpleITK as sitk
from nnunet.utilities.sitk_stuff import copy_geometry
from batchgenerators.utilities.file_and_folder_operations import *
import shutil
def load_remove_save(input_file: str, output_file: str, for_which_classes: list,
minimum_valid_object_size: dict = None):
# Only objects larger than minimum_valid_object_size will be removed. Keys in minimum_valid_object_size must
# match entries in for_which_classes
img_in = sitk.ReadImage(input_file)
img_npy = sitk.GetArrayFromImage(img_in)
volume_per_voxel = float(np.prod(img_in.GetSpacing(), dtype=np.float64))
image, largest_removed, kept_size = remove_all_but_the_largest_connected_component(img_npy, for_which_classes,
volume_per_voxel,
minimum_valid_object_size)
# print(input_file, "kept:", kept_size)
img_out_itk = sitk.GetImageFromArray(image)
img_out_itk = copy_geometry(img_out_itk, img_in)
sitk.WriteImage(img_out_itk, output_file)
return largest_removed, kept_size
def remove_all_but_the_largest_connected_component(image: np.ndarray, for_which_classes: list, volume_per_voxel: float,
minimum_valid_object_size: dict = None):
"""
removes all but the largest connected component, individually for each class
:param image:
:param for_which_classes: can be None. Should be list of int. Can also be something like [(1, 2), 2, 4].
Here (1, 2) will be treated as a joint region, not individual classes (example LiTS here we can use (1, 2)
to use all foreground classes together)
:param minimum_valid_object_size: Only objects larger than minimum_valid_object_size will be removed. Keys in
minimum_valid_object_size must match entries in for_which_classes
:return:
"""
if for_which_classes is None:
for_which_classes = np.unique(image)
for_which_classes = for_which_classes[for_which_classes > 0]
assert 0 not in for_which_classes, "cannot remove background"
largest_removed = {}
kept_size = {}
for c in for_which_classes:
if isinstance(c, (list, tuple)):
c = tuple(c) # otherwise it cant be used as key in the dict
mask = np.zeros_like(image, dtype=bool)
for cl in c:
mask[image == cl] = True
else:
mask = image == c
# get labelmap and number of objects
lmap, num_objects = label(mask.astype(int))
# collect object sizes
object_sizes = {}
for object_id in range(1, num_objects + 1):
object_sizes[object_id] = (lmap == object_id).sum() * volume_per_voxel
largest_removed[c] = None
kept_size[c] = None
if num_objects > 0:
# we always keep the largest object. We could also consider removing the largest object if it is smaller
# than minimum_valid_object_size in the future but we don't do that now.
maximum_size = max(object_sizes.values())
kept_size[c] = maximum_size
for object_id in range(1, num_objects + 1):
# we only remove objects that are not the largest
if object_sizes[object_id] != maximum_size:
# we only remove objects that are smaller than minimum_valid_object_size
remove = True
if minimum_valid_object_size is not None:
remove = object_sizes[object_id] < minimum_valid_object_size[c]
if remove:
image[(lmap == object_id) & mask] = 0
if largest_removed[c] is None:
largest_removed[c] = object_sizes[object_id]
else:
largest_removed[c] = max(largest_removed[c], object_sizes[object_id])
return image, largest_removed, kept_size
def load_postprocessing(json_file):
'''
loads the relevant part of the pkl file that is needed for applying postprocessing
:param pkl_file:
:return:
'''
a = load_json(json_file)
if 'min_valid_object_sizes' in a.keys():
min_valid_object_sizes = ast.literal_eval(a['min_valid_object_sizes'])
else:
min_valid_object_sizes = None
return a['for_which_classes'], min_valid_object_sizes
def determine_postprocessing(base, gt_labels_folder, raw_subfolder_name="validation_raw",
temp_folder="temp",
final_subf_name="validation_final", processes=default_num_threads,
dice_threshold=0, debug=False,
advanced_postprocessing=False,
pp_filename="postprocessing.json"):
"""
:param base:
:param gt_labels_folder: subfolder of base with niftis of ground truth labels
:param raw_subfolder_name: subfolder of base with niftis of predicted (non-postprocessed) segmentations
:param temp_folder: used to store temporary data, will be deleted after we are done here undless debug=True
:param final_subf_name: final results will be stored here (subfolder of base)
:param processes:
:param dice_threshold: only apply postprocessing if results is better than old_result+dice_threshold (can be used as eps)
:param debug: if True then the temporary files will not be deleted
:return:
"""
# lets see what classes are in the dataset
classes = [int(i) for i in load_json(join(base, raw_subfolder_name, "summary.json"))['results']['mean'].keys() if
int(i) != 0]
folder_all_classes_as_fg = join(base, temp_folder + "_allClasses")
folder_per_class = join(base, temp_folder + "_perClass")
if isdir(folder_all_classes_as_fg):
shutil.rmtree(folder_all_classes_as_fg)
if isdir(folder_per_class):
shutil.rmtree(folder_per_class)
# multiprocessing rules
p = Pool(processes)
assert isfile(join(base, raw_subfolder_name, "summary.json")), "join(base, raw_subfolder_name) does not " \
"contain a summary.json"
# these are all the files we will be dealing with
fnames = subfiles(join(base, raw_subfolder_name), suffix=".nii.gz", join=False)
# make output and temp dir
maybe_mkdir_p(folder_all_classes_as_fg)
maybe_mkdir_p(folder_per_class)
maybe_mkdir_p(join(base, final_subf_name))
pp_results = {}
pp_results['dc_per_class_raw'] = {}
pp_results['dc_per_class_pp_all'] = {} # dice scores after treating all foreground classes as one
pp_results['dc_per_class_pp_per_class'] = {} # dice scores after removing everything except larges cc
# independently for each class after we already did dc_per_class_pp_all
pp_results['for_which_classes'] = []
pp_results['min_valid_object_sizes'] = {}
validation_result_raw = load_json(join(base, raw_subfolder_name, "summary.json"))['results']
pp_results['num_samples'] = len(validation_result_raw['all'])
validation_result_raw = validation_result_raw['mean']
if advanced_postprocessing:
# first treat all foreground classes as one and remove all but the largest foreground connected component
results = []
for f in fnames:
predicted_segmentation = join(base, raw_subfolder_name, f)
# now remove all but the largest connected component for each class
output_file = join(folder_all_classes_as_fg, f)
results.append(p.starmap_async(load_remove_save, ((predicted_segmentation, output_file, (classes,)),)))
results = [i.get() for i in results]
# aggregate max_size_removed and min_size_kept
max_size_removed = {}
min_size_kept = {}
for tmp in results:
mx_rem, min_kept = tmp[0]
for k in mx_rem:
if mx_rem[k] is not None:
if max_size_removed.get(k) is None:
max_size_removed[k] = mx_rem[k]
else:
max_size_removed[k] = max(max_size_removed[k], mx_rem[k])
for k in min_kept:
if min_kept[k] is not None:
if min_size_kept.get(k) is None:
min_size_kept[k] = min_kept[k]
else:
min_size_kept[k] = min(min_size_kept[k], min_kept[k])
print("foreground vs background, smallest valid object size was", min_size_kept[tuple(classes)])
print("removing only objects smaller than that...")
else:
min_size_kept = None
# we need to rerun the step from above, now with the size constraint
pred_gt_tuples = []
results = []
# first treat all foreground classes as one and remove all but the largest foreground connected component
for f in fnames:
predicted_segmentation = join(base, raw_subfolder_name, f)
# now remove all but the largest connected component for each class
output_file = join(folder_all_classes_as_fg, f)
results.append(
p.starmap_async(load_remove_save, ((predicted_segmentation, output_file, (classes,), min_size_kept),)))
pred_gt_tuples.append([output_file, join(gt_labels_folder, f)])
_ = [i.get() for i in results]
# evaluate postprocessed predictions
_ = aggregate_scores(pred_gt_tuples, labels=classes,
json_output_file=join(folder_all_classes_as_fg, "summary.json"),
json_author="Fabian", num_threads=processes)
# now we need to figure out if doing this improved the dice scores. We will implement that defensively in so far
# that if a single class got worse as a result we won't do this. We can change this in the future but right now I
# prefer to do it this way
validation_result_PP_test = load_json(join(folder_all_classes_as_fg, "summary.json"))['results']['mean']
for c in classes:
dc_raw = validation_result_raw[str(c)]['Dice']
dc_pp = validation_result_PP_test[str(c)]['Dice']
pp_results['dc_per_class_raw'][str(c)] = dc_raw
pp_results['dc_per_class_pp_all'][str(c)] = dc_pp
# true if new is better
do_fg_cc = False
comp = [pp_results['dc_per_class_pp_all'][str(cl)] > (pp_results['dc_per_class_raw'][str(cl)] + dice_threshold) for
cl in classes]
before = np.mean([pp_results['dc_per_class_raw'][str(cl)] for cl in classes])
after = np.mean([pp_results['dc_per_class_pp_all'][str(cl)] for cl in classes])
print("Foreground vs background")
print("before:", before)
print("after: ", after)
if any(comp):
# at least one class improved - yay!
# now check if another got worse
# true if new is worse
any_worse = any(
[pp_results['dc_per_class_pp_all'][str(cl)] < pp_results['dc_per_class_raw'][str(cl)] for cl in classes])
if not any_worse:
pp_results['for_which_classes'].append(classes)
if min_size_kept is not None:
pp_results['min_valid_object_sizes'].update(deepcopy(min_size_kept))
do_fg_cc = True
print("Removing all but the largest foreground region improved results!")
print('for_which_classes', classes)
print('min_valid_object_sizes', min_size_kept)
else:
# did not improve things - don't do it
pass
if len(classes) > 1:
# now depending on whether we do remove all but the largest foreground connected component we define the source dir
# for the next one to be the raw or the temp dir
if do_fg_cc:
source = folder_all_classes_as_fg
else:
source = join(base, raw_subfolder_name)
if advanced_postprocessing:
# now run this for each class separately
results = []
for f in fnames:
predicted_segmentation = join(source, f)
output_file = join(folder_per_class, f)
results.append(p.starmap_async(load_remove_save, ((predicted_segmentation, output_file, classes),)))
results = [i.get() for i in results]
# aggregate max_size_removed and min_size_kept
max_size_removed = {}
min_size_kept = {}
for tmp in results:
mx_rem, min_kept = tmp[0]
for k in mx_rem:
if mx_rem[k] is not None:
if max_size_removed.get(k) is None:
max_size_removed[k] = mx_rem[k]
else:
max_size_removed[k] = max(max_size_removed[k], mx_rem[k])
for k in min_kept:
if min_kept[k] is not None:
if min_size_kept.get(k) is None:
min_size_kept[k] = min_kept[k]
else:
min_size_kept[k] = min(min_size_kept[k], min_kept[k])
print("classes treated separately, smallest valid object sizes are")
print(min_size_kept)
print("removing only objects smaller than that")
else:
min_size_kept = None
# rerun with the size thresholds from above
pred_gt_tuples = []
results = []
for f in fnames:
predicted_segmentation = join(source, f)
output_file = join(folder_per_class, f)
results.append(p.starmap_async(load_remove_save, ((predicted_segmentation, output_file, classes, min_size_kept),)))
pred_gt_tuples.append([output_file, join(gt_labels_folder, f)])
_ = [i.get() for i in results]
# evaluate postprocessed predictions
_ = aggregate_scores(pred_gt_tuples, labels=classes,
json_output_file=join(folder_per_class, "summary.json"),
json_author="Fabian", num_threads=processes)
if do_fg_cc:
old_res = deepcopy(validation_result_PP_test)
else:
old_res = validation_result_raw
# these are the new dice scores
validation_result_PP_test = load_json(join(folder_per_class, "summary.json"))['results']['mean']
for c in classes:
dc_raw = old_res[str(c)]['Dice']
dc_pp = validation_result_PP_test[str(c)]['Dice']
pp_results['dc_per_class_pp_per_class'][str(c)] = dc_pp
print(c)
print("before:", dc_raw)
print("after: ", dc_pp)
if dc_pp > (dc_raw + dice_threshold):
pp_results['for_which_classes'].append(int(c))
if min_size_kept is not None:
pp_results['min_valid_object_sizes'].update({c: min_size_kept[c]})
print("Removing all but the largest region for class %d improved results!" % c)
print('min_valid_object_sizes', min_size_kept)
else:
print("Only one class present, no need to do each class separately as this is covered in fg vs bg")
if not advanced_postprocessing:
pp_results['min_valid_object_sizes'] = None
print("done")
print("for which classes:")
print(pp_results['for_which_classes'])
print("min_object_sizes")
print(pp_results['min_valid_object_sizes'])
pp_results['validation_raw'] = raw_subfolder_name
pp_results['validation_final'] = final_subf_name
# now that we have a proper for_which_classes, apply that
pred_gt_tuples = []
results = []
for f in fnames:
predicted_segmentation = join(base, raw_subfolder_name, f)
# now remove all but the largest connected component for each class
output_file = join(base, final_subf_name, f)
results.append(p.starmap_async(load_remove_save, (
(predicted_segmentation, output_file, pp_results['for_which_classes'],
pp_results['min_valid_object_sizes']),)))
pred_gt_tuples.append([output_file,
join(gt_labels_folder, f)])
_ = [i.get() for i in results]
# evaluate postprocessed predictions
_ = aggregate_scores(pred_gt_tuples, labels=classes,
json_output_file=join(base, final_subf_name, "summary.json"),
json_author="Fabian", num_threads=processes)
pp_results['min_valid_object_sizes'] = str(pp_results['min_valid_object_sizes'])
save_json(pp_results, join(base, pp_filename))
# delete temp
if not debug:
shutil.rmtree(folder_per_class)
shutil.rmtree(folder_all_classes_as_fg)
p.close()
p.join()
print("done")
def apply_postprocessing_to_folder(input_folder: str, output_folder: str, for_which_classes: list,
min_valid_object_size:dict=None, num_processes=8):
"""
applies removing of all but the largest connected component to all niftis in a folder
:param min_valid_object_size:
:param min_valid_object_size:
:param input_folder:
:param output_folder:
:param for_which_classes:
:param num_processes:
:return:
"""
maybe_mkdir_p(output_folder)
p = Pool(num_processes)
nii_files = subfiles(input_folder, suffix=".nii.gz", join=False)
input_files = [join(input_folder, i) for i in nii_files]
out_files = [join(output_folder, i) for i in nii_files]
results = p.starmap_async(load_remove_save, zip(input_files, out_files, [for_which_classes] * len(input_files),
[min_valid_object_size] * len(input_files)))
res = results.get()
p.close()
p.join()
if __name__ == "__main__":
input_folder = "/media/fabian/DKFZ/predictions_Fabian/Liver_and_LiverTumor"
output_folder = "/media/fabian/DKFZ/predictions_Fabian/Liver_and_LiverTumor_postprocessed"
for_which_classes = [(1, 2), ]
apply_postprocessing_to_folder(input_folder, output_folder, for_which_classes)
| 45.571096
| 128
| 0.628184
|
07af434a860b58de1e3322111fb1896e565d344a
| 1,846
|
py
|
Python
|
calc/migrations/0004_speedups.py
|
ifireball/sos-calc
|
9941c543534306c77fe0f08be55af68d5514a2c8
|
[
"MIT"
] | null | null | null |
calc/migrations/0004_speedups.py
|
ifireball/sos-calc
|
9941c543534306c77fe0f08be55af68d5514a2c8
|
[
"MIT"
] | null | null | null |
calc/migrations/0004_speedups.py
|
ifireball/sos-calc
|
9941c543534306c77fe0f08be55af68d5514a2c8
|
[
"MIT"
] | 1
|
2021-05-19T14:10:21.000Z
|
2021-05-19T14:10:21.000Z
|
# Generated by Django 3.1.5 on 2021-01-08 07:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('calc', '0003_auto_20210106_1104'),
]
operations = [
migrations.CreateModel(
name='Speedups',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('day', models.DateField()),
('training_1m', models.IntegerField(default=0)),
('training_5m', models.IntegerField(default=0)),
('training_1h', models.IntegerField(default=0)),
('healing_1m', models.IntegerField(default=0)),
('healing_5m', models.IntegerField(default=0)),
('healing_1h', models.IntegerField(default=0)),
('construction_1m', models.IntegerField(default=0)),
('construction_5m', models.IntegerField(default=0)),
('construction_1h', models.IntegerField(default=0)),
('research_1m', models.IntegerField(default=0)),
('research_5m', models.IntegerField(default=0)),
('research_1h', models.IntegerField(default=0)),
('generic_1m', models.IntegerField(default=0)),
('generic_5m', models.IntegerField(default=0)),
('generic_1h', models.IntegerField(default=0)),
('generic_3h', models.IntegerField(default=0)),
('generic_8h', models.IntegerField(default=0)),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='calc.account')),
],
options={
'unique_together': {('account', 'day')},
},
),
]
| 42.930233
| 114
| 0.568797
|
1efb86e66f365dd2e24bbbfb830dea37bdab7e4c
| 6,260
|
py
|
Python
|
autolens/pipeline/phase/dataset/meta_dataset.py
|
PyJedi/PyAutoLens
|
bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7
|
[
"MIT"
] | null | null | null |
autolens/pipeline/phase/dataset/meta_dataset.py
|
PyJedi/PyAutoLens
|
bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7
|
[
"MIT"
] | null | null | null |
autolens/pipeline/phase/dataset/meta_dataset.py
|
PyJedi/PyAutoLens
|
bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7
|
[
"MIT"
] | null | null | null |
import autofit as af
import autoarray as aa
from autolens import exc
from autolens.fit import fit
from autoarray.operators.inversion import pixelizations as pix
import numpy as np
def isprior(obj):
if isinstance(obj, af.PriorModel):
return True
return False
def isinstance_or_prior(obj, cls):
if isinstance(obj, cls):
return True
if isinstance(obj, af.PriorModel) and obj.cls == cls:
return True
return False
class MetaDataset:
def __init__(
self,
model,
sub_size=2,
signal_to_noise_limit=None,
auto_positions_factor=None,
positions_threshold=None,
pixel_scale_interpolation_grid=None,
inversion_uses_border=True,
inversion_pixel_limit=None,
is_hyper_phase=False,
):
self.is_hyper_phase = is_hyper_phase
self.model = model
self.sub_size = sub_size
self.signal_to_noise_limit = signal_to_noise_limit
self.auto_positions_factor = auto_positions_factor
self.positions_threshold = positions_threshold
self.pixel_scale_interpolation_grid = pixel_scale_interpolation_grid
self.inversion_uses_border = inversion_uses_border
self.inversion_pixel_limit = (
inversion_pixel_limit
or af.conf.instance.general.get(
"inversion", "inversion_pixel_limit_overall", int
)
)
def mask_with_phase_sub_size_from_mask(self, mask):
if mask.sub_size != self.sub_size:
mask = aa.Mask.manual(
mask_2d=mask,
pixel_scales=mask.pixel_scales,
sub_size=self.sub_size,
origin=mask.origin,
)
return mask
def updated_positions_from_positions_and_results(self, positions, results):
"""If automatic position updating is on, update the phase's positions using the results of the previous phase's
lens model, by ray-tracing backwards the best-fit source centre(s) to the image-plane.
The outcome of this function are as follows:
1) If auto positioning is off (self.auto_positions_factor is None), use the previous phase's positions.
2) If auto positioning is on (self.auto_positions_factor not None) use positions based on the previous phase's
best-fit tracer. However, if this tracer gives 1 or less positions, use the previous positions.
3) If auto positioning is on or off and there is no previous phase, use the input positions.
"""
if self.auto_positions_factor is not None and results.last is not None:
updated_positions = (
results.last.image_plane_multiple_image_positions_of_source_plane_centres
)
# TODO : Coorrdinates refascotr will sort out index call here
if updated_positions:
if len(updated_positions[0]) > 1:
return updated_positions
if results.last is not None:
if results.last.positions and results.last.positions is not None:
return results.last.positions
return positions
def updated_positions_threshold_from_positions(self, positions, results) -> [float]:
"""
If automatic position updating is on, update the phase's threshold using this phase's updated positions.
First, we ray-trace forward the positions of the source-plane centres (see above) via the mass model to
determine how far apart they are separated. This gives us their source-plane sepration, which is multiplied by
self.auto_positions_factor to set the threshold."""
if self.auto_positions_factor and results.last is not None:
if positions is None:
return None
positions_fits = fit.FitPositions(
positions=aa.Coordinates(coordinates=positions),
tracer=results.last.most_likely_tracer,
noise_map=1.0,
)
return self.auto_positions_factor * np.max(
positions_fits.maximum_separations
)
else:
return self.positions_threshold
def check_positions(self, positions):
if self.positions_threshold is not None and positions is None:
raise exc.PhaseException(
"You have specified for a phase to use positions, but not input positions to the "
"pipeline when you ran it."
)
@property
def pixelization(self):
for galaxy in self.model.galaxies:
if hasattr(galaxy, "pixelization"):
if galaxy.pixelization is not None:
if isinstance(galaxy.pixelization, af.PriorModel):
return galaxy.pixelization.cls
else:
return galaxy.pixelization
@property
def has_pixelization(self):
if self.pixelization is not None:
return True
else:
return False
@property
def uses_cluster_inversion(self):
if self.model.galaxies:
for galaxy in self.model.galaxies:
if isinstance_or_prior(galaxy.pixelization, pix.VoronoiBrightnessImage):
return True
return False
@property
def pixelizaition_is_model(self):
if self.model.galaxies:
for galaxy in self.model.galaxies:
if isprior(galaxy.pixelization):
return True
return False
def preload_pixelization_grids_of_planes_from_results(self, results):
if self.is_hyper_phase:
return None
if (
results.last is not None
and self.pixelization is not None
and not self.pixelizaition_is_model
):
if self.pixelization.__class__ is results.last.pixelization.__class__:
if hasattr(results.last, "hyper_combined"):
return (
results.last.hyper_combined.most_likely_pixelization_grids_of_planes
)
else:
return results.last.most_likely_pixelization_grids_of_planes
return None
| 34.777778
| 119
| 0.633227
|
8f729045223d49c94883e46fa1a31a3eb4b4fdce
| 114
|
py
|
Python
|
grokking_algorithms/04_quicksort/python/sum_recursion.py
|
youaresherlock/PythonPractice
|
2e22d3fdcb26353cb0d8215c150e84d11bc9a022
|
[
"Apache-2.0"
] | null | null | null |
grokking_algorithms/04_quicksort/python/sum_recursion.py
|
youaresherlock/PythonPractice
|
2e22d3fdcb26353cb0d8215c150e84d11bc9a022
|
[
"Apache-2.0"
] | null | null | null |
grokking_algorithms/04_quicksort/python/sum_recursion.py
|
youaresherlock/PythonPractice
|
2e22d3fdcb26353cb0d8215c150e84d11bc9a022
|
[
"Apache-2.0"
] | 1
|
2019-11-05T01:10:15.000Z
|
2019-11-05T01:10:15.000Z
|
# -*- coding: utf-8 -*-
# 列表元素之和-递归写法
def sum(list):
if list == []:
return 0
return list[0] + sum(list[1:])
| 16.285714
| 31
| 0.54386
|
41c0336a5368610e436642dad8a47d0f8c85364a
| 1,158
|
py
|
Python
|
Ejemplos/28.py
|
ampotty/uip-pc3
|
8362680226df6629791e7a4c6cdf1b738eadc5de
|
[
"MIT"
] | 10
|
2015-10-27T18:29:06.000Z
|
2019-04-03T04:05:31.000Z
|
Ejemplos/28.py
|
abdelgmartinezl/uip-pc3
|
8362680226df6629791e7a4c6cdf1b738eadc5de
|
[
"MIT"
] | 5
|
2015-10-13T01:12:51.000Z
|
2016-10-08T18:01:17.000Z
|
Ejemplos/28.py
|
ampotty/uip-pc3
|
8362680226df6629791e7a4c6cdf1b738eadc5de
|
[
"MIT"
] | 25
|
2015-09-19T00:40:17.000Z
|
2018-02-08T02:54:55.000Z
|
"""
Ej. 28
Autor: Zahir Gudiño
Email: zahir.gudino@gmail.com
Descripcion:
Demostrar uso sencillo del hilo-individual event loop comunmente empleaod programacion asincronas. Tenga en cuenta
este un ejemplo muy artificial sólo para demostrar el patrón común. asyncio ofrece muchisimas funcionalidades
--ej. parallel execution, coroutine cancellation, etc...-- que pertenecen a otros temas.
Ver (https://docs.python.org/3.6/library/asyncio.html#module-asyncio).
"""
import asyncio
async def ticker(delay, to):
"""Yield numero en rango cero a x, `to`, cada y, `@delay`, segundos"""
for i in range(to):
yield i
await asyncio.sleep(delay)
async def main():
my_list = []
print("Async Loop (ejecutando...)")
# Iterar por cada yield de manera asincrona...
async for x in ticker(1, 5):
my_list.append(x)
print(my_list, "\n")
print("Async Comprehensions List (ejecutando...)")
# otra manera, via Async Comprehensions
print([x async for x in ticker(1, 5)], "\n")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
| 27.571429
| 118
| 0.678756
|
f2e8216f7f4a7872ea09034e6a6793dde120bb1c
| 4,015
|
py
|
Python
|
llvm/utils/lit/tests/shtest-env.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 2,338
|
2018-06-19T17:34:51.000Z
|
2022-03-31T11:00:37.000Z
|
llvm/utils/lit/tests/shtest-env.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 3,740
|
2019-01-23T15:36:48.000Z
|
2022-03-31T22:01:13.000Z
|
llvm/utils/lit/tests/shtest-env.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 500
|
2019-01-23T07:49:22.000Z
|
2022-03-30T02:59:37.000Z
|
# Check the env command
# RUN: not %{lit} -a -v %{inputs}/shtest-env \
# RUN: | FileCheck -match-full-lines %s
#
# END.
# Make sure env commands are included in printed commands.
# CHECK: -- Testing: 16 tests{{.*}}
# CHECK: FAIL: shtest-env :: env-args-last-is-assign.txt ({{[^)]*}})
# CHECK: $ "env" "FOO=1"
# CHECK: Error: 'env' requires a subcommand
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-env :: env-args-last-is-u-arg.txt ({{[^)]*}})
# CHECK: $ "env" "-u" "FOO"
# CHECK: Error: 'env' requires a subcommand
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-env :: env-args-last-is-u.txt ({{[^)]*}})
# CHECK: $ "env" "-u"
# CHECK: Error: 'env' requires a subcommand
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-env :: env-args-nested-none.txt ({{[^)]*}})
# CHECK: $ "env" "env" "env"
# CHECK: Error: 'env' requires a subcommand
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-env :: env-args-none.txt ({{[^)]*}})
# CHECK: $ "env"
# CHECK: Error: 'env' requires a subcommand
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-env :: env-calls-cd.txt ({{[^)]*}})
# CHECK: $ "env" "-u" "FOO" "BAR=3" "cd" "foobar"
# CHECK: Error: 'env' cannot call 'cd'
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-env :: env-calls-colon.txt ({{[^)]*}})
# CHECK: $ "env" "-u" "FOO" "BAR=3" ":"
# CHECK: Error: 'env' cannot call ':'
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-env :: env-calls-echo.txt ({{[^)]*}})
# CHECK: $ "env" "-u" "FOO" "BAR=3" "echo" "hello" "world"
# CHECK: Error: 'env' cannot call 'echo'
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: PASS: shtest-env :: env-calls-env.txt ({{[^)]*}})
# CHECK: $ "env" "env" "{{[^"]*}}" "print_environment.py"
# CHECK: $ "env" "FOO=2" "env" "BAR=1" "{{[^"]*}}" "print_environment.py"
# CHECK: $ "env" "-u" "FOO" "env" "-u" "BAR" "{{[^"]*}}" "print_environment.py"
# CHECK: $ "env" "-u" "FOO" "BAR=1" "env" "-u" "BAR" "FOO=2" "{{[^"]*}}" "print_environment.py"
# CHECK: $ "env" "-u" "FOO" "BAR=1" "env" "-u" "BAR" "FOO=2" "env" "BAZ=3" "{{[^"]*}}" "print_environment.py"
# CHECK-NOT: ${{.*}}print_environment.py
# CHECK: FAIL: shtest-env :: env-calls-export.txt ({{[^)]*}})
# CHECK: $ "env" "-u" "FOO" "BAR=3" "export" "BAZ=3"
# CHECK: Error: 'env' cannot call 'export'
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-env :: env-calls-mkdir.txt ({{[^)]*}})
# CHECK: $ "env" "-u" "FOO" "BAR=3" "mkdir" "foobar"
# CHECK: Error: 'env' cannot call 'mkdir'
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-env :: env-calls-not-builtin.txt ({{[^)]*}})
# CHECK: $ "env" "-u" "FOO" "BAR=3" "not" "rm" "{{.*}}.no-such-file"
# CHECK: Error: 'env' cannot call 'rm'
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: FAIL: shtest-env :: env-calls-rm.txt ({{[^)]*}})
# CHECK: $ "env" "-u" "FOO" "BAR=3" "rm" "foobar"
# CHECK: Error: 'env' cannot call 'rm'
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: PASS: shtest-env :: env-u.txt ({{[^)]*}})
# CHECK: $ "{{[^"]*}}" "print_environment.py"
# CHECK: $ "env" "-u" "FOO" "{{[^"]*}}" "print_environment.py"
# CHECK: $ "env" "-u" "FOO" "-u" "BAR" "{{[^"]*}}" "print_environment.py"
# CHECK-NOT: ${{.*}}print_environment.py
# CHECK: PASS: shtest-env :: env.txt ({{[^)]*}})
# CHECK: $ "env" "A_FOO=999" "{{[^"]*}}" "print_environment.py"
# CHECK: $ "env" "A_FOO=1" "B_BAR=2" "C_OOF=3" "{{[^"]*}}" "print_environment.py"
# CHECK-NOT: ${{.*}}print_environment.py
# CHECK: PASS: shtest-env :: mixed.txt ({{[^)]*}})
# CHECK: $ "env" "A_FOO=999" "-u" "FOO" "{{[^"]*}}" "print_environment.py"
# CHECK: $ "env" "A_FOO=1" "-u" "FOO" "B_BAR=2" "-u" "BAR" "C_OOF=3" "{{[^"]*}}" "print_environment.py"
# CHECK-NOT: ${{.*}}print_environment.py
# CHECK: Passed: 4
# CHECK: Failed: 12
# CHECK-NOT: {{.}}
| 40.555556
| 109
| 0.571357
|
88b8ab05075cf7babe213b3abfed3d14822b7cfc
| 5,266
|
py
|
Python
|
sdk/netapp/azure-mgmt-netapp/tests/test_backup_policies.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/netapp/azure-mgmt-netapp/tests/test_backup_policies.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/netapp/azure-mgmt-netapp/tests/test_backup_policies.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
import time
from azure.mgmt.resource import ResourceManagementClient
from devtools_testutils import AzureMgmtRecordedTestCase, recorded_by_proxy
from azure.mgmt.netapp.models import BackupPolicy, BackupPolicyPatch
from test_account import create_account, delete_account
from setup import *
import azure.mgmt.netapp.models
TEST_BACKUP_POLICY_1='sdk-py-tests-backup-policy-1'
TEST_BACKUP_POLICY_2='sdk-py-tests-backup-policy-2'
BACKUP_POLICIES = [TEST_BACKUP_POLICY_1, TEST_BACKUP_POLICY_2]
def create_backup_policy(client, backup_policy_name, rg=TEST_RG, account_name=TEST_ACC_1, location=LOCATION, backup_policy_only=False):
if not backup_policy_only:
create_account(client, rg, account_name, location)
backup_policy_body = BackupPolicy(
location=location,
daily_backups_to_keep=1,
weekly_backups_to_keep=0,
monthly_backups_to_keep=0,
enabled=True
)
backup_policy = client.backup_policies.begin_create(rg, account_name, backup_policy_name, backup_policy_body).result()
return backup_policy
def delete_backup_policy(client, backup_policy_name, rg=TEST_RG, account_name=TEST_ACC_1, live=False):
client.backup_policies.begin_delete(rg, account_name, backup_policy_name).wait()
wait_for_no_backup_policy(client, rg, account_name, backup_policy_name, live)
def wait_for_no_backup_policy(client, rg, account_name, backup_policy_name, live=False):
# a workaround for the async nature of certain ARM processes
co = 0
while co < 5:
co += 1
if live:
time.sleep(2)
try:
client.backup_policies.get(rg, account_name, backup_policy_name)
except:
# not found is an exception case (status code 200 expected)
# and is actually what we are waiting for
break
def wait_for_backup_policy_state(client, desired_state, rg=TEST_RG, account_name=TEST_ACC_1,
backup_policy_name=TEST_BACKUP_POLICY_1, live=False):
co = 0
while co < 5:
co += 1
policy = client.backup_policies.get(rg, account_name, backup_policy_name)
if policy.provisioning_state == desired_state:
break
if live:
time.sleep(5)
class TestNetAppBackupPolicies(AzureMgmtRecordedTestCase):
def setup_method(self, method):
self.client = self.create_mgmt_client(azure.mgmt.netapp.NetAppManagementClient)
# Before tests are run live a resource group needs to be created along with vnet and subnet
# Note that when tests are run in live mode it is best to run one test at a time.
@recorded_by_proxy
def test_create_delete_backup_policy(self):
create_backup_policy(self.client, TEST_BACKUP_POLICY_1)
backup_policies_list = self.client.backup_policies.list(TEST_RG, TEST_ACC_1)
assert len(list(backup_policies_list)) == 1
delete_backup_policy(self.client, TEST_BACKUP_POLICY_1, live=self.is_live)
backup_policies_list = self.client.backup_policies.list(TEST_RG, TEST_ACC_1)
assert len(list(backup_policies_list)) == 0
delete_account(self.client, TEST_RG, TEST_ACC_1, live=self.is_live)
@recorded_by_proxy
def test_list_backup_policies(self):
create_backup_policy(self.client, TEST_BACKUP_POLICY_1)
create_backup_policy(self.client, TEST_BACKUP_POLICY_2, backup_policy_only=True)
backup_policies_list = self.client.backup_policies.list(TEST_RG, TEST_ACC_1)
assert len(list(backup_policies_list)) == 2
idx = 0
for backup_policy in backup_policies_list:
assert backup_policy.name == BACKUP_POLICIES[idx]
idx += 1
delete_backup_policy(self.client, TEST_BACKUP_POLICY_1, live=self.is_live)
delete_backup_policy(self.client, TEST_BACKUP_POLICY_2, live=self.is_live)
backup_policies_list = self.client.backup_policies.list(TEST_RG, TEST_ACC_1)
assert len(list(backup_policies_list)) == 0
delete_account(self.client, TEST_RG, TEST_ACC_1)
@recorded_by_proxy
def test_get_backup_policy_by_name(self):
create_backup_policy(self.client, TEST_BACKUP_POLICY_1)
backup_policy = self.client.backup_policies.get(TEST_RG, TEST_ACC_1, TEST_BACKUP_POLICY_1)
assert backup_policy.name == TEST_ACC_1 + "/" + TEST_BACKUP_POLICY_1
delete_backup_policy(self.client, TEST_BACKUP_POLICY_1, live=self.is_live)
delete_account(self.client, TEST_RG, TEST_ACC_1)
@recorded_by_proxy
def test_update_backup_policies(self):
create_backup_policy(self.client, TEST_BACKUP_POLICY_1)
backup_policy_body = BackupPolicyPatch(
location=LOCATION,
daily_backups_to_keep=0,
weekly_backups_to_keep=1,
monthly_backups_to_keep=0,
enabled=True
)
backup_policy = self.client.backup_policies.begin_update(TEST_RG, TEST_ACC_1, TEST_BACKUP_POLICY_1, backup_policy_body).result()
assert backup_policy.daily_backups_to_keep == 0
assert backup_policy.weekly_backups_to_keep == 1
delete_backup_policy(self.client, TEST_BACKUP_POLICY_1, live=self.is_live)
delete_account(self.client, TEST_RG, TEST_ACC_1)
| 40.821705
| 136
| 0.731485
|
1dcc8f6b606fe2a9ac23e1af40d487d87d02fb15
| 2,799
|
py
|
Python
|
main.py
|
Alaa-MK/-Verilog-Netlist-Enhancer
|
a3e50b6cc008a16fa75ce1dbe02cf1c0388e4e52
|
[
"MIT"
] | null | null | null |
main.py
|
Alaa-MK/-Verilog-Netlist-Enhancer
|
a3e50b6cc008a16fa75ce1dbe02cf1c0388e4e52
|
[
"MIT"
] | null | null | null |
main.py
|
Alaa-MK/-Verilog-Netlist-Enhancer
|
a3e50b6cc008a16fa75ce1dbe02cf1c0388e4e52
|
[
"MIT"
] | null | null | null |
from Netlist import Netlist
#from Liberty import Liberty
import matplotlib.pyplot as plt
import networkx as nx
import sys, getopt
import os
def main(argv):
# vfile=input('please enter the name of the v file: ')
# libfile = input('please enter the name of the liberty file: ')
vfile='examples/new/uart_synth.rtl_new.v'
libfile = 'examples/osu035.lib'
while not (os.path.isfile(vfile)) or not (os.path.isfile(libfile)):
print("file not exist. Please, re-enter")
vfile=input('please enter the name of the v file: ')
libfile = input('please enter the name of the liberty file: ')
netlist = Netlist(vfile, libfile)
choice = ''
while True:
choice = input('''
==================================================================================
Please choose an option of the following:
- delay: report the maximum delay of the circuit
- n-cells: report the total number of cells in the circuit
- fanout: report the max fanout of the circuit
- buffer: satisfy the max fanout constraint using buffering
- clone: try to satisfy the max fanout constraint using cloning
- size: do greedy sizing algorithm to decrease the delay of the critical path
- graph: visualize the circuit as a graph
- netlist: print the current verilog netlist
- quit: quit the program
=================================================================================
> ''')
c = choice.lower()
if c=='delay':
print(netlist.report_max_delay())
# g = netlist.get_graph()
# for e in g.edges():
# print(g.get_edge_data(e[0], e[1]))
elif c=='n-cells':
netlist.report_no_of_cells_of_each_type()
elif c=='fanout':
print(netlist.max_fanout())
elif c=='buffer':
fo = input("please enter the desired max fanout: ")
netlist.buffer_all(int(fo))
print('Max Fanout: ', netlist.max_fanout())
elif c=='clone':
fo = input("please enter the desired max fanout: ")
netlist.clone_all(int(fo))
print('Max Fanout: ', netlist.max_fanout())
elif c=='size':
delay = input("please enter the desired max delay: ")
print('Delay Before Sizing: ', netlist.report_max_delay())
netlist.sizing_up(float(delay))
print('Delay After Sizing: ', netlist.report_max_delay())
elif c=='graph':
nx.draw(netlist.get_graph(),with_labels = True)
plt.show()
elif c=='netlist':
print(netlist.to_v_netlist())
elif c=='quit':
return
else:
print('Please choose a valid option.')
if __name__ == "__main__":
main(sys.argv[1:])
| 35.884615
| 82
| 0.570918
|
914bc080e805eac826641f7cc8b60c4e1e2ea7a9
| 9,111
|
py
|
Python
|
test/color/test_hsv.py
|
pworinger/kornia
|
a8bddbc5412694d778b1a7338e0d001910bb8024
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-06-25T18:10:22.000Z
|
2021-06-25T18:10:22.000Z
|
test/color/test_hsv.py
|
pworinger/kornia
|
a8bddbc5412694d778b1a7338e0d001910bb8024
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/color/test_hsv.py
|
pworinger/kornia
|
a8bddbc5412694d778b1a7338e0d001910bb8024
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-05-15T03:22:24.000Z
|
2021-05-15T03:22:24.000Z
|
import pytest
import math
import kornia
from kornia.testing import BaseTester
import torch
from torch.autograd import gradcheck
from torch.testing import assert_allclose
class TestRgbToHsv(BaseTester):
def test_smoke(self, device, dtype):
C, H, W = 3, 4, 5
img = torch.rand(C, H, W, device=device, dtype=dtype)
assert isinstance(kornia.color.rgb_to_hsv(img), torch.Tensor)
@pytest.mark.parametrize(
"shape", [(1, 3, 4, 4), (2, 3, 2, 4), (3, 3, 4, 1), (3, 2, 1)])
def test_cardinality(self, device, dtype, shape):
img = torch.ones(shape, device=device, dtype=dtype)
assert kornia.color.rgb_to_hsv(img).shape == shape
def test_exception(self, device, dtype):
with pytest.raises(TypeError):
assert kornia.color.rgb_to_hsv([0.])
with pytest.raises(ValueError):
img = torch.ones(1, 1, device=device, dtype=dtype)
assert kornia.color.rgb_to_hsv(img)
with pytest.raises(ValueError):
img = torch.ones(2, 1, 1, device=device, dtype=dtype)
assert kornia.color.rgb_to_hsv(img)
def test_unit(self, device, dtype):
data = torch.tensor([
[[0.3944633, 0.8597369, 0.1670904, 0.2825457, 0.0953912],
[0.1251704, 0.8020709, 0.8933256, 0.9170977, 0.1497008],
[0.2711633, 0.1111478, 0.0783281, 0.2771807, 0.5487481],
[0.0086008, 0.8288748, 0.9647092, 0.8922020, 0.7614344],
[0.2898048, 0.1282895, 0.7621747, 0.5657831, 0.9918593]],
[[0.5414237, 0.9962701, 0.8947155, 0.5900949, 0.9483274],
[0.0468036, 0.3933847, 0.8046577, 0.3640994, 0.0632100],
[0.6171775, 0.8624780, 0.4126036, 0.7600935, 0.7279997],
[0.4237089, 0.5365476, 0.5591233, 0.1523191, 0.1382165],
[0.8932794, 0.8517839, 0.7152701, 0.8983801, 0.5905426]],
[[0.2869580, 0.4700376, 0.2743714, 0.8135023, 0.2229074],
[0.9306560, 0.3734594, 0.4566821, 0.7599275, 0.7557513],
[0.7415742, 0.6115875, 0.3317572, 0.0379378, 0.1315770],
[0.8692724, 0.0809556, 0.7767404, 0.8742208, 0.1522012],
[0.7708948, 0.4509611, 0.0481175, 0.2358997, 0.6900532]]
], device=device, dtype=dtype)
# OpenCV
expected = torch.tensor([
[[1.6519808, 1.31889750, 2.24879380, 3.58221600, 2.25095400],
[4.2816400, 0.04868213, 0.83454597, 5.53361700, 4.31957400],
[3.4185164, 2.79190370, 2.88832240, 1.74746920, 1.36192720],
[3.6837196, 0.63789610, 5.72131160, 5.26143740, 6.25968700],
[2.9292210, 2.56143520, 0.97840965, 1.57294110, 6.02352240]],
[[0.4699935, 0.52820253, 0.81324730, 0.65267974, 0.89941100],
[0.9497089, 0.53438100, 0.48878422, 0.60298723, 0.91636120],
[0.6343409, 0.87112963, 0.81016120, 0.95008780, 0.81926220],
[0.9901055, 0.90233060, 0.42042294, 0.82927720, 0.81847864],
[0.6755719, 0.84938710, 0.93686795, 0.73741645, 0.40461043]],
[[0.5414237, 0.99627006, 0.89471555, 0.81350225, 0.94832740],
[0.9306560, 0.80207086, 0.89332560, 0.91709770, 0.75575125],
[0.7415741, 0.86247796, 0.41260356, 0.76009345, 0.72799970],
[0.8692723, 0.82887480, 0.96470920, 0.89220200, 0.76143440],
[0.8932794, 0.85178390, 0.76217470, 0.89838010, 0.99185926]]
], device=device, dtype=dtype)
assert_allclose(kornia.color.rgb_to_hsv(data), expected)
def test_nan_rgb_to_hsv(self, device, dtype):
data = torch.zeros(1, 5, 5, device=device, dtype=dtype) # 3x5x5
data = data.repeat(3, 1, 1) # 2x3x5x5
expected = torch.zeros_like(data) # 3x5x5
assert_allclose(kornia.color.rgb_to_hsv(data), expected)
@pytest.mark.grad
def test_gradcheck(self, device, dtype):
B, C, H, W = 2, 3, 4, 4
img = torch.rand(B, C, H, W, device=device, dtype=torch.float64, requires_grad=True)
assert gradcheck(kornia.color.rgb_to_hsv, (img,), raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
B, C, H, W = 2, 3, 4, 4
img = torch.ones(B, C, H, W, device=device, dtype=dtype)
op = kornia.color.rgb_to_hsv
op_jit = torch.jit.script(op)
assert_allclose(op(img), op_jit(img))
@pytest.mark.nn
def test_module(self, device, dtype):
B, C, H, W = 2, 3, 4, 4
img = torch.ones(B, C, H, W, device=device, dtype=dtype)
ops = kornia.color.RgbToHsv().to(device, dtype)
fcn = kornia.color.rgb_to_hsv
assert_allclose(ops(img), fcn(img))
class TestHsvToRgb(BaseTester):
def test_smoke(self, device, dtype):
C, H, W = 3, 4, 5
img = torch.rand(C, H, W, device=device, dtype=dtype)
assert isinstance(kornia.color.hsv_to_rgb(img), torch.Tensor)
@pytest.mark.parametrize(
"shape", [(1, 3, 4, 4), (2, 3, 2, 4), (3, 3, 4, 1), (3, 2, 1)])
def test_cardinality(self, device, dtype, shape):
img = torch.ones(shape, device=device, dtype=dtype)
assert kornia.color.hsv_to_rgb(img).shape == shape
def test_exception(self, device, dtype):
with pytest.raises(TypeError):
assert kornia.color.hsv_to_rgb([0.])
with pytest.raises(ValueError):
img = torch.ones(1, 1, device=device, dtype=dtype)
assert kornia.color.hsv_to_rgb(img)
with pytest.raises(ValueError):
img = torch.ones(2, 1, 1, device=device, dtype=dtype)
assert kornia.color.hsv_to_rgb(img)
def test_unit(self, device, dtype):
data = torch.tensor([[
[[3.5433271, 5.6390061, 1.3766849, 2.5384088, 4.6848912],
[5.7209363, 5.3262630, 6.2059994, 4.1164689, 2.3872600],
[0.6370091, 3.6186798, 5.9170871, 2.8275447, 5.4289737],
[0.2751994, 1.6632686, 1.0049511, 0.7046204, 1.3791083],
[0.7863123, 4.4852505, 4.3064494, 2.5573561, 5.9083076]],
[[0.5026655, 0.9453601, 0.5929778, 0.2632897, 0.4590443],
[0.6201433, 0.5610679, 0.9653260, 0.0830478, 0.5000827],
[0.6067343, 0.6422323, 0.6777940, 0.7705711, 0.6050767],
[0.5495264, 0.5573426, 0.4683768, 0.2268902, 0.2116482],
[0.6525245, 0.0022379, 0.4909980, 0.1682271, 0.6327152]],
[[0.8471680, 0.9302199, 0.3265766, 0.7944570, 0.7038843],
[0.4833369, 0.2088473, 0.1169234, 0.4966302, 0.6448684],
[0.2713015, 0.5893380, 0.6015301, 0.6801558, 0.2322258],
[0.5704236, 0.6797268, 0.4755683, 0.4811209, 0.5317836],
[0.3236262, 0.0999796, 0.3614958, 0.5117705, 0.8194097]]
]], device=device, dtype=dtype)
# OpenCV
expected = torch.tensor([[
[[0.42132590, 0.93021995, 0.26564622, 0.58528465, 0.53384290],
[0.48333693, 0.20884734, 0.11692339, 0.45538613, 0.32238087],
[0.27130150, 0.21084610, 0.60153013, 0.15604737, 0.23222584],
[0.57042360, 0.45685310, 0.47556830, 0.48112088, 0.49611038],
[0.32362622, 0.09981924, 0.20394461, 0.42567685, 0.81940967]],
[[0.68380290, 0.05082710, 0.32657660, 0.79445700, 0.38077020],
[0.18359877, 0.09166980, 0.00405421, 0.45823452, 0.64486840],
[0.20682439, 0.41690278, 0.19381660, 0.68015575, 0.09171140],
[0.33933756, 0.67972680, 0.46658220, 0.44541004, 0.53178360],
[0.27101707, 0.09975589, 0.18400209, 0.51177055, 0.30095676]],
[[0.84716797, 0.59178180, 0.13292392, 0.67397410, 0.70388430],
[0.34453064, 0.19874583, 0.01237347, 0.49663020, 0.41256943],
[0.10669357, 0.58933800, 0.33635240, 0.52297890, 0.20633064],
[0.25696078, 0.30088606, 0.25282317, 0.37195927, 0.41923255],
[0.11245217, 0.09997964, 0.36149580, 0.46373847, 0.48655340]]
]], device=device, dtype=dtype)
f = kornia.color.hsv_to_rgb
assert_allclose(f(data), expected)
data[:, 0] += 2 * math.pi
assert_allclose(f(data), expected)
data[:, 0] -= 4 * math.pi
assert_allclose(f(data), expected)
@pytest.mark.grad
def test_gradcheck(self, device, dtype):
B, C, H, W = 2, 3, 4, 4
img = torch.rand(B, C, H, W, device=device, dtype=torch.float64, requires_grad=True)
assert gradcheck(kornia.color.hsv_to_rgb, (img,), raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
B, C, H, W = 2, 3, 4, 4
img = torch.ones(B, C, H, W, device=device, dtype=dtype)
op = kornia.color.hsv_to_rgb
op_jit = torch.jit.script(op)
assert_allclose(op(img), op_jit(img))
@pytest.mark.nn
def test_module(self, device, dtype):
B, C, H, W = 2, 3, 4, 4
img = torch.ones(B, C, H, W, device=device, dtype=dtype)
ops = kornia.color.HsvToRgb().to(device, dtype)
fcn = kornia.color.hsv_to_rgb
assert_allclose(ops(img), fcn(img))
| 46.015152
| 92
| 0.596532
|
ad11615d80dbe531dc668ca2dee05a16c9f43de4
| 837
|
py
|
Python
|
python/services/config_db_service.py
|
filipemot/sistema_distancia_entre_cidades
|
2798acf2f8b92ed793b203c0ac7c5bf50e008e46
|
[
"MIT"
] | null | null | null |
python/services/config_db_service.py
|
filipemot/sistema_distancia_entre_cidades
|
2798acf2f8b92ed793b203c0ac7c5bf50e008e46
|
[
"MIT"
] | null | null | null |
python/services/config_db_service.py
|
filipemot/sistema_distancia_entre_cidades
|
2798acf2f8b92ed793b203c0ac7c5bf50e008e46
|
[
"MIT"
] | null | null | null |
import logging
import os
import sys
from configparser import ConfigParser
class ConfigDbService:
@staticmethod
def config(filename=os.path.dirname(sys.modules['__main__'].__file__) + '\\database.ini',
section='postgresql') -> dict:
try:
parser: ConfigParser = ConfigParser()
parser.read(filename)
db = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
db[param[0]] = param[1]
else:
logging.error('Section {0} not found in the {1} file'.format(section, filename))
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
return db
except Exception as e:
raise e
| 28.862069
| 98
| 0.565114
|
9f8438d139776111a4ddd1ba76f218fa770b52b3
| 6,444
|
py
|
Python
|
endpoints/v2/__init__.py
|
jakedt/quay
|
424c1a19d744be444ed27aa1718fd74af311d863
|
[
"Apache-2.0"
] | 1
|
2020-10-16T19:30:41.000Z
|
2020-10-16T19:30:41.000Z
|
endpoints/v2/__init__.py
|
jakedt/quay
|
424c1a19d744be444ed27aa1718fd74af311d863
|
[
"Apache-2.0"
] | 15
|
2020-06-18T15:32:06.000Z
|
2022-03-03T23:06:24.000Z
|
endpoints/v2/__init__.py
|
jakedt/quay
|
424c1a19d744be444ed27aa1718fd74af311d863
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os.path
from functools import wraps
from urlparse import urlparse
from urllib import urlencode
from flask import Blueprint, make_response, url_for, request, jsonify
from semantic_version import Spec
import features
from app import app, get_app_url
from auth.auth_context import get_authenticated_context
from auth.permissions import (
ReadRepositoryPermission,
ModifyRepositoryPermission,
AdministerRepositoryPermission,
)
from auth.registry_jwt_auth import process_registry_jwt_auth, get_auth_headers
from data.registry_model import registry_model
from data.readreplica import ReadOnlyModeException
from endpoints.decorators import anon_protect, anon_allowed, route_show_if
from endpoints.v2.errors import (
V2RegistryException,
Unauthorized,
Unsupported,
NameUnknown,
ReadOnlyMode,
)
from util.http import abort
from util.metrics.prometheus import timed_blueprint
from util.registry.dockerver import docker_version
from util.pagination import encrypt_page_token, decrypt_page_token
logger = logging.getLogger(__name__)
v2_bp = timed_blueprint(Blueprint("v2", __name__))
@v2_bp.app_errorhandler(V2RegistryException)
def handle_registry_v2_exception(error):
response = jsonify({"errors": [error.as_dict()]})
response.status_code = error.http_status_code
if response.status_code == 401:
response.headers.extend(get_auth_headers(repository=error.repository, scopes=error.scopes))
logger.debug("sending response: %s", response.get_data())
return response
@v2_bp.app_errorhandler(ReadOnlyModeException)
def handle_readonly(ex):
error = ReadOnlyMode()
response = jsonify({"errors": [error.as_dict()]})
response.status_code = error.http_status_code
logger.debug("sending response: %s", response.get_data())
return response
_MAX_RESULTS_PER_PAGE = app.config.get("V2_PAGINATION_SIZE", 100)
def paginate(
start_id_kwarg_name="start_id",
limit_kwarg_name="limit",
callback_kwarg_name="pagination_callback",
):
"""
Decorates a handler adding a parsed pagination token and a callback to encode a response token.
"""
def wrapper(func):
@wraps(func)
def wrapped(*args, **kwargs):
try:
requested_limit = int(request.args.get("n", _MAX_RESULTS_PER_PAGE))
except ValueError:
requested_limit = 0
limit = max(min(requested_limit, _MAX_RESULTS_PER_PAGE), 1)
next_page_token = request.args.get("next_page", request.args.get("last", None))
# Decrypt the next page token, if any.
start_id = None
page_info = decrypt_page_token(next_page_token)
if page_info is not None:
start_id = page_info.get("start_id", None)
def callback(results, response):
if len(results) <= limit:
return
next_page_token = encrypt_page_token({"start_id": max([obj.id for obj in results])})
link_url = os.path.join(
get_app_url(), url_for(request.endpoint, **request.view_args)
)
link_param = urlencode({"n": limit, "next_page": next_page_token})
link = '<%s?%s>; rel="next"' % (link_url, link_param)
response.headers["Link"] = link
kwargs[limit_kwarg_name] = limit
kwargs[start_id_kwarg_name] = start_id
kwargs[callback_kwarg_name] = callback
return func(*args, **kwargs)
return wrapped
return wrapper
def _require_repo_permission(permission_class, scopes=None, allow_public=False):
def wrapper(func):
@wraps(func)
def wrapped(namespace_name, repo_name, *args, **kwargs):
logger.debug(
"Checking permission %s for repo: %s/%s",
permission_class,
namespace_name,
repo_name,
)
permission = permission_class(namespace_name, repo_name)
if permission.can():
return func(namespace_name, repo_name, *args, **kwargs)
repository = namespace_name + "/" + repo_name
if allow_public:
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
if repository_ref is None or not repository_ref.is_public:
raise Unauthorized(repository=repository, scopes=scopes)
if repository_ref.kind != "image":
msg = (
"This repository is for managing %s and not container images."
% repository_ref.kind
)
raise Unsupported(detail=msg)
if repository_ref.is_public:
if not features.ANONYMOUS_ACCESS:
raise Unauthorized(repository=repository, scopes=scopes)
return func(namespace_name, repo_name, *args, **kwargs)
raise Unauthorized(repository=repository, scopes=scopes)
return wrapped
return wrapper
require_repo_read = _require_repo_permission(
ReadRepositoryPermission, scopes=["pull"], allow_public=True
)
require_repo_write = _require_repo_permission(ModifyRepositoryPermission, scopes=["pull", "push"])
require_repo_admin = _require_repo_permission(
AdministerRepositoryPermission, scopes=["pull", "push"]
)
def get_input_stream(flask_request):
if flask_request.headers.get("transfer-encoding") == "chunked":
return flask_request.environ["wsgi.input"]
return flask_request.stream
@v2_bp.route("/")
@route_show_if(features.ADVERTISE_V2)
@process_registry_jwt_auth()
@anon_allowed
def v2_support_enabled():
docker_ver = docker_version(request.user_agent.string)
# Check if our version is one of the blacklisted versions, if we can't
# identify the version (None) we will fail open and assume that it is
# newer and therefore should not be blacklisted.
if docker_ver is not None and Spec(app.config["BLACKLIST_V2_SPEC"]).match(docker_ver):
abort(404)
response = make_response("true", 200)
if get_authenticated_context() is None:
response = make_response("true", 401)
response.headers.extend(get_auth_headers())
return response
from endpoints.v2 import (
blob,
catalog,
manifest,
tag,
v2auth,
)
| 32.545455
| 100
| 0.671167
|
9fc87c3768bc8d83fb18735358d755b9425d1c24
| 2,788
|
py
|
Python
|
dnms/udp/client.py
|
jacksontj/dnms_prototype
|
cadc8dde0460ba5e1b3e7e77bc9f754c6363707a
|
[
"MIT"
] | null | null | null |
dnms/udp/client.py
|
jacksontj/dnms_prototype
|
cadc8dde0460ba5e1b3e7e77bc9f754c6363707a
|
[
"MIT"
] | null | null | null |
dnms/udp/client.py
|
jacksontj/dnms_prototype
|
cadc8dde0460ba5e1b3e7e77bc9f754c6363707a
|
[
"MIT"
] | null | null | null |
import tornado.iostream
import tornado.ioloop
import tornado.concurrent
import tornado
import time
import socket
import functools
import collections
class UDPRequest(object):
def __init__(self, addr, port, data, src_port=0):
self.addr = addr
self.port = port
self.data = data
self.src_port = src_port
class _UDPConnection(object):
def __init__(self, io_loop, request, release_callback, future, max_buffer_size):
self.start_time = time.time()
self.io_loop = io_loop
self.request = request
self.release_callback = release_callback
self.future = future
addrinfo = socket.getaddrinfo(
request.addr,
request.port,
socket.AF_INET,
socket.SOCK_DGRAM,
0,
0,
)
af, socktype, proto, canonname, sockaddr = addrinfo[0]
sock = socket.socket(af, socktype, proto)
if request.src_port:
sock.bind(('0.0.0.0', request.src_port))
self.stream = tornado.iostream.IOStream(
sock,
io_loop=self.io_loop,max_buffer_size=2500,
)
self.stream.connect(sockaddr,self._on_connect)
def _on_connect(self):
self.stream.write(self.request.data)
# TODO: buf size?
self.stream.read_bytes(1024, partial=True, callback=self._on_response)
def _on_response(self,data):
if self.release_callback is not None:
release_callback = self.release_callback
self.release_callback = None
release_callback()
if self.future:
self.future.set_result(data)
self.stream.close()
class AsyncUDPClient(object):
def __init__(self, io_loop=None):
self.io_loop = io_loop or tornado.ioloop.IOLoop.instance()
self.max_clients = 10
self.queue = collections.deque()
self.active = {}
self.max_buffer_size = 2500
# TODO: timeout
def fetch(self, request, **kwargs):
future = tornado.concurrent.Future()
self.queue.append((request, future))
self._process_queue()
return future
def _process_queue(self):
with tornado.stack_context.NullContext():
while self.queue and len(self.active) < self.max_clients:
request, future = self.queue.popleft()
key = object()
self.active[key] = (request, future)
_UDPConnection(
self.io_loop,
request,
functools.partial(self._release_fetch,key),
future,
self.max_buffer_size,
)
def _release_fetch(self,key):
del self.active[key]
self._process_queue()
| 29.659574
| 84
| 0.596844
|
e53de675a692ed487a7f94a58102ec995ed4bee6
| 1,151
|
py
|
Python
|
services/consuming_services_apis/setup.py
|
OblongCheese/ConsumingServicesWithPython
|
a8057720f110c13a5f1e1256e4cc5d5f14386d15
|
[
"MIT"
] | null | null | null |
services/consuming_services_apis/setup.py
|
OblongCheese/ConsumingServicesWithPython
|
a8057720f110c13a5f1e1256e4cc5d5f14386d15
|
[
"MIT"
] | null | null | null |
services/consuming_services_apis/setup.py
|
OblongCheese/ConsumingServicesWithPython
|
a8057720f110c13a5f1e1256e4cc5d5f14386d15
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'pyramid',
'pyramid_chameleon',
'waitress',
]
tests_require = [
'WebTest >= 1.3.1', # py3 compat
'pytest',
'pytest-cov',
]
setup(
name='consuming_services_apis',
version='0.0',
description='consuming_services_apis',
long_description=README + '\n\n' + CHANGES,
classifiers=[
'Programming Language :: Python',
'Framework :: Pyramid',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
],
author='',
author_email='',
url='',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
extras_require={
'testing': tests_require,
},
install_requires=requires,
entry_points={
'paste.app_factory': [
'main = consuming_services_apis:main',
],
},
)
| 22.568627
| 63
| 0.604692
|
e40afcaefa6203b3b7103ce01c3e4298d440fd88
| 890
|
py
|
Python
|
simulator_runner.py
|
MikaelBertze/HallonDisp
|
b83ef5d3550ad6e6debab69ae65e73efccf6fa74
|
[
"MIT"
] | null | null | null |
simulator_runner.py
|
MikaelBertze/HallonDisp
|
b83ef5d3550ad6e6debab69ae65e73efccf6fa74
|
[
"MIT"
] | null | null | null |
simulator_runner.py
|
MikaelBertze/HallonDisp
|
b83ef5d3550ad6e6debab69ae65e73efccf6fa74
|
[
"MIT"
] | null | null | null |
from consolemenu import *
from consolemenu.items import *
from hallondisp.utils.iot_simulators import *
broker = "bulbasaur.bertze.se"
powerSimulator = PowerSensorSimulator(broker, .1, 5, 200)
#powerSimulator.connect()
powerSimulator.run()
tempSimulator = TempSimulator(broker, -10, 30, 200, 5)
#tempSimulator.connect()
tempSimulator.run()
doorSimulator = DoorSimulator(broker, "door1")
#doorSimulator.connect()
doorSimulator.run()
def main():
menu = ConsoleMenu("Title", "Subtitle")
for sim in [powerSimulator, tempSimulator, doorSimulator]:
function_item = FunctionItem(f"Toggle pause for {sim.name}", sim.toggle_pause)
menu.append_item(function_item)
doorState = "Open" if doorSimulator.door_state else "Close"
menu.append_item(FunctionItem(f"{doorState} door", doorSimulator.toggle_door))
menu.show()
while True:
input()
main()
| 23.421053
| 86
| 0.732584
|
9b0869c88320ec6ec40c3f3da67135dddfe31579
| 193
|
py
|
Python
|
fibo.py
|
ATrain951/01.python_function-milaan9
|
0e776b98dd6349efe2789ded1d54ccb453325414
|
[
"MIT"
] | 167
|
2021-06-28T03:50:28.000Z
|
2022-03-21T14:56:29.000Z
|
fibo.py
|
olivia0000/04_Python_Functions
|
0e776b98dd6349efe2789ded1d54ccb453325414
|
[
"MIT"
] | null | null | null |
fibo.py
|
olivia0000/04_Python_Functions
|
0e776b98dd6349efe2789ded1d54ccb453325414
|
[
"MIT"
] | 155
|
2021-06-28T03:55:09.000Z
|
2022-03-21T14:56:30.000Z
|
# Fibonacci numbers module
def fib(n): # return Fibonacci series up to n
result = []
a, b = 0, 1
while b < n:
result.append(b)
a, b = b, a + b
return result
| 14.846154
| 45
| 0.528497
|
c29be88a7e88f97b66742f48dc659e6d9297764d
| 6,663
|
py
|
Python
|
mirror/App.py
|
FrancescoSaverioZuppichini/mirror
|
db38362a8ce300bab04d86f2659d6d9bf516c850
|
[
"MIT"
] | 234
|
2018-12-02T12:53:53.000Z
|
2022-02-04T22:11:18.000Z
|
mirror/App.py
|
garain/mirror
|
a099f8eb87ec6517463123e514acaf63741136ea
|
[
"MIT"
] | 12
|
2019-01-06T17:38:34.000Z
|
2022-02-27T21:35:23.000Z
|
mirror/App.py
|
garain/mirror
|
a099f8eb87ec6517463123e514acaf63741136ea
|
[
"MIT"
] | 23
|
2018-12-17T13:44:15.000Z
|
2021-07-06T17:58:54.000Z
|
import json
import io
import torch
import time
from flask import Flask, request, Response, send_file, jsonify
from torchvision.transforms import ToPILImage
from .visualisations.web import Weights
from .ModuleTracer import ModuleTracer
class App(Flask):
default_visualisations = [Weights]
MAX_LINKS_EVERY_REQUEST = 64
def __init__(self, inputs, model, visualisations=[]):
super().__init__(__name__)
self.cache = {} # internal cache used to store the results
self.outputs = None # holds the current output from a visualisation
if len(inputs) <= 0: raise ValueError('At least one input is required.')
self.inputs, self.model = inputs, model
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.current_input = self.inputs[0].unsqueeze(0).to(self.device) # add 1 dim for batch
self.module = model.to(self.device).eval()
self.setup_tracer()
self.setup_visualisations(visualisations)
@self.route('/')
def root():
return self.send_static_file('index.html')
@self.route('/api/model', methods=['GET'])
def api_model():
model = self.tracer.to_JSON()
response = jsonify(model)
return response
@self.route('/api/inputs', methods=['GET', 'PUT'])
def api_inputs():
if request.method == 'GET':
self.outputs = self.inputs
response = ['/api/model/image/{}/{}/{}/{}/{}'.format(hash(None),
hash(None),
hash(time.time()),
id,
i) for i in range(len(self.inputs))]
response = jsonify({'links': response, 'next': False})
elif request.method == 'PUT':
data = json.loads(request.data.decode())
input_index = data['id']
self.current_input = self.inputs[input_index].unsqueeze(0).to(self.device)
response = jsonify(data)
return response
@self.route('/api/model/layer/<id>')
def api_model_layer(id):
id = int(id)
name = self.traced[id].name
return Response(response=name)
@self.route('/api/visualisation', methods=['GET'])
def api_visualisations():
serialised = [v.to_JSON() for v in self.visualisations]
response = jsonify({'visualisations': serialised,
'current': self.current_vis.to_JSON()})
return response
@self.route('/api/visualisation', methods=['PUT'])
def api_visualisation():
data = json.loads(request.data.decode())
vis_key = data['name']
visualisations_not_exist = vis_key not in self.name2visualisations
if visualisations_not_exist:
response = Response(status=500,
response='Visualisation {} not supported or does not exist'.format(vis_key))
else:
self.current_vis = self.name2visualisations[vis_key]
self.current_vis.from_JSON(data['params'])
self.current_vis.clean_cache()
response = jsonify(self.current_vis.to_JSON())
return response
@self.route('/api/model/layer/output/<id>')
def api_model_layer_output(id):
try:
layer = self.traced[id].module
if self.current_input not in self.current_vis.cache: self.current_vis.cache[self.current_input] = {}
layer_cache = self.current_vis.cache[self.current_input]
# always clone the input to avoid being modified
input_clone = self.current_input.clone()
if layer not in layer_cache:
layer_cache[layer] = self.current_vis(input_clone, layer)
del input_clone
else:
print('[INFO] cached')
self.outputs, _ = layer_cache[layer]
if len(self.outputs.shape) < 3: raise ValueError
last = int(request.args['last'])
max = min((last + self.MAX_LINKS_EVERY_REQUEST), self.outputs.shape[0])
response = ['/api/model/image/{}/{}/{}/{}/{}'.format(hash(self.current_input),
hash(self.current_vis),
hash(time.time()),
id,
i) for i in range(last, max)]
response = jsonify({'links': response, 'next': last + 1 < max})
except KeyError:
response = Response(status=500, response='Index not found.')
except ValueError:
response = Response(status=404, response='Outputs must be an array of images')
except StopIteration:
response = jsonify({'links': [], 'next': False})
return response
@self.route('/api/model/image/<input_id>/<vis_id>/<layer_id>/<time>/<output_id>')
def api_model_layer_output_image(input_id, vis_id, layer_id, time, output_id):
output_id = int(output_id)
try:
output = self.outputs[output_id]
output = output.detach().cpu()
pil_img = ToPILImage()(output)
img_io = io.BytesIO()
pil_img.save(img_io, 'JPEG', quality=70)
img_io.seek(0)
return send_file(img_io, mimetype='image/jpeg')
except KeyError:
return Response(status=500, response='Index not found.')
def setup_tracer(self):
# instantiate a Tracer object and trace one input
self.tracer = ModuleTracer(module=self.module)
self.tracer(self.current_input)
# store the traced graph as a dictionary
self.traced = self.tracer.__dict__()
def setup_visualisations(self, visualisations):
visualisations = [*self.default_visualisations, *visualisations]
self.visualisations = [v(self.module, self.device) for v in visualisations]
self.name2visualisations = {v.name: v for v in self.visualisations}
self.current_vis = self.visualisations[0]
| 39.660714
| 116
| 0.538496
|
b77beaa9661c0ea030f8b68c2a59be0755fc6cab
| 151
|
py
|
Python
|
test/test_widgets/__init__.py
|
SunChuquin/pyqode.core
|
edf29204446e3679701e74343288cf692eb07d86
|
[
"MIT"
] | 23
|
2015-01-08T15:04:47.000Z
|
2022-03-08T07:47:08.000Z
|
test/test_widgets/__init__.py
|
SunChuquin/pyqode.core
|
edf29204446e3679701e74343288cf692eb07d86
|
[
"MIT"
] | 16
|
2021-02-01T08:54:08.000Z
|
2022-01-09T10:23:57.000Z
|
test/test_widgets/__init__.py
|
SunChuquin/pyqode.core
|
edf29204446e3679701e74343288cf692eb07d86
|
[
"MIT"
] | 24
|
2015-01-09T14:16:41.000Z
|
2021-12-06T15:11:22.000Z
|
"""
Those tests are very basic, we just check if the widget can be instantiated
without error (to make sure they work for the supported environmne
"""
| 30.2
| 75
| 0.768212
|
c31bc096977a1f5c2aaf1b58c1c0515700a36bae
| 3,191
|
py
|
Python
|
legacy/legacy/recommenders/cdl.py
|
csmithchicago/openrec
|
5a9cf03abe0db0636107985f9f19d6351e4afe68
|
[
"MIT"
] | null | null | null |
legacy/legacy/recommenders/cdl.py
|
csmithchicago/openrec
|
5a9cf03abe0db0636107985f9f19d6351e4afe68
|
[
"MIT"
] | 6
|
2020-01-28T22:51:16.000Z
|
2022-02-10T00:11:19.000Z
|
legacy/legacy/recommenders/cdl.py
|
csmithchicago/openrec
|
5a9cf03abe0db0636107985f9f19d6351e4afe68
|
[
"MIT"
] | null | null | null |
from openrec.legacy.recommenders import PMF
from openrec.legacy.modules.extractions import SDAE
from openrec.legacy.modules.fusions import Average
class CDL(PMF):
def __init__(
self,
batch_size,
max_user,
max_item,
dim_embed,
item_f,
dims,
dropout=None,
test_batch_size=None,
item_serving_size=None,
l2_reg=None,
l2_reg_mlp=None,
l2_reconst=None,
opt="SGD",
sess_config=None,
):
self._item_f = item_f
self._dims = dims
self._dropout = dropout
self._l2_reg_mlp = l2_reg_mlp
self._l2_reconst = l2_reconst
super(CDL, self).__init__(
batch_size=batch_size,
max_user=max_user,
max_item=max_item,
dim_embed=dim_embed,
l2_reg=l2_reg,
test_batch_size=test_batch_size,
opt=opt,
sess_config=sess_config,
)
def _build_item_inputs(self, train=True):
super(CDL, self)._build_item_inputs(train)
if train:
self._add_input(
name="item_feature",
dtype="float32",
shape=[self._batch_size, self._item_f.shape[1]],
)
else:
self._add_input(name="item_id", dtype="int32", shape=[None], train=False)
self._add_input(
name="item_feature",
dtype="float32",
shape=[None, self._item_f.shape[1]],
train=False,
)
def _input_mappings(self, batch_data, train):
default_input_map = super(CDL, self)._input_mappings(
batch_data=batch_data, train=train
)
if train:
default_input_map[self._get_input("item_feature")] = self._item_f[
batch_data["item_id_input"]
]
else:
default_input_map[self._get_input("item_id", train=False)] = batch_data[
"item_id_input"
]
default_input_map[
self._get_input("item_feature", train=False)
] = self._item_f[batch_data["item_id_input"]]
return default_input_map
def _build_item_extractions(self, train=True):
super(CDL, self)._build_item_extractions(train)
self._add_module(
"item_f",
SDAE(
in_tensor=self._get_input("item_feature", train=train),
dims=self._dims,
l2_reg=self._l2_reg_mlp,
l2_reconst=self._l2_reconst,
dropout=self._dropout,
scope="AutoEncoder",
reuse=False,
),
train=train,
)
def _build_default_fusions(self, train=True):
self._add_module(
"item_vec",
Average(
scope="item_average",
reuse=not train,
module_list=[
self._get_module("item_vec", train=train),
self._get_module("item_f", train=train),
],
weight=2.0,
),
train=train,
)
| 28.747748
| 85
| 0.530241
|
b218444d59a36e45b2febe2614451ec35e689484
| 945
|
py
|
Python
|
kubernetes/test/test_v1_node_selector.py
|
scele/kubernetes-client-python
|
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_node_selector.py
|
scele/kubernetes-client-python
|
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_node_selector.py
|
scele/kubernetes-client-python
|
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_node_selector import V1NodeSelector
class TestV1NodeSelector(unittest.TestCase):
""" V1NodeSelector unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1NodeSelector(self):
"""
Test V1NodeSelector
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_node_selector.V1NodeSelector()
pass
if __name__ == '__main__':
unittest.main()
| 21
| 105
| 0.703704
|
62b9b6bdcb2c17b12dbdc14a7aab8bea12efa2c9
| 210
|
py
|
Python
|
docs/examples/led_board_4.py
|
NotBobTheBuilder/gpiozero
|
aeb9d30056ec97e6bf896152e71a870bd0099b4e
|
[
"BSD-3-Clause"
] | 743
|
2019-07-31T02:57:08.000Z
|
2022-03-31T08:48:30.000Z
|
docs/examples/led_board_4.py
|
NotBobTheBuilder/gpiozero
|
aeb9d30056ec97e6bf896152e71a870bd0099b4e
|
[
"BSD-3-Clause"
] | 259
|
2019-07-29T14:26:40.000Z
|
2022-03-27T00:17:56.000Z
|
docs/examples/led_board_4.py
|
NotBobTheBuilder/gpiozero
|
aeb9d30056ec97e6bf896152e71a870bd0099b4e
|
[
"BSD-3-Clause"
] | 127
|
2019-08-03T19:30:18.000Z
|
2022-02-24T14:33:50.000Z
|
from gpiozero import LEDBoard
from time import sleep
leds = LEDBoard(2, 3, 4, 5, 6, 7, 8, 9)
leds[0].on() # first led on
sleep(1)
leds[7].on() # last led on
sleep(1)
leds[-1].off() # last led off
sleep(1)
| 17.5
| 39
| 0.638095
|
a6a5a3231736b8a21108ffd952ba04aae9555026
| 23,457
|
py
|
Python
|
scripts/config_apicv10.py
|
jesusmah/apic-config-pipeline
|
2ac53be31e7cf7a68cac0a2f22ab3408faac434d
|
[
"Apache-2.0"
] | null | null | null |
scripts/config_apicv10.py
|
jesusmah/apic-config-pipeline
|
2ac53be31e7cf7a68cac0a2f22ab3408faac434d
|
[
"Apache-2.0"
] | 1
|
2022-03-15T08:30:05.000Z
|
2022-03-15T08:30:05.000Z
|
scripts/config_apicv10.py
|
jesusmah/apic-config-pipeline
|
2ac53be31e7cf7a68cac0a2f22ab3408faac434d
|
[
"Apache-2.0"
] | 6
|
2021-11-03T16:56:15.000Z
|
2022-03-11T18:46:20.000Z
|
import os, json
import utils
import api_calls
"""
API Connect v10 post install configuration steps --> https://www.ibm.com/docs/en/api-connect/10.0.x?topic=environment-cloud-manager-configuration-checklist
"""
FILE_NAME = "config_apicv10.py"
DEBUG = os.getenv('DEBUG','')
# This is the default out of the box catalog that gets created when a Provider Organization is created.
catalog_name = "sandbox"
def info(step):
return "[INFO]["+ FILE_NAME +"][STEP " + str(step) + "] - "
try:
######################################################################################
# Step 1 - Get the IBM API Connect Toolkit credentials and environment configuration #
######################################################################################
print(info(1) + "######################################################################################")
print(info(1) + "# Step 1 - Get the IBM API Connect Toolkit credentials and environment configuration #")
print(info(1) + "######################################################################################")
toolkit_credentials = utils.get_toolkit_credentials(os.environ["CONFIG_FILES_DIR"])
environment_config = utils.get_env_config(os.environ["CONFIG_FILES_DIR"])
if DEBUG:
print(info(1) + "These are the IBM API Connect Toolkit Credentials")
print(info(1) + "-------------------------------------------------")
print(info(1), json.dumps(toolkit_credentials, indent=4, sort_keys=False))
print(info(1) + "These is the environment configuration")
print(info(1) + "--------------------------------------")
print(info(1), json.dumps(environment_config, indent=4, sort_keys=False))
##################################################################
# Step 2 - Get the IBM API Connect Cloud Management Bearer Token #
##################################################################
print(info(2) + "##################################################################")
print(info(2) + "# Step 2 - Get the IBM API Connect Cloud Management Bearer Token #")
print(info(2) + "##################################################################")
admin_bearer_token = api_calls.get_bearer_token(environment_config["APIC_ADMIN_URL"],
"admin",
environment_config["APIC_ADMIN_PASSWORD"],
"admin/default-idp-1",
toolkit_credentials["toolkit"]["client_id"],
toolkit_credentials["toolkit"]["client_secret"])
if DEBUG:
print(info(2) + "This is the Bearer Token to work against the IBM API Connect Cloud Management endpoints")
print(info(2) + "--------------------------------------------------------------------------------------")
print(info(2), admin_bearer_token)
#################################
# Step 3 - Get the Admin org ID #
#################################
print(info(3) + "#################################")
print(info(3) + "# Step 3 - Get the Admin org ID #")
print(info(3) + "#################################")
url = 'https://' + environment_config["APIC_ADMIN_URL"] + '/api/cloud/orgs'
response = api_calls.make_api_call(url, admin_bearer_token, 'get')
found = False
admin_org_id = ''
if response.status_code != 200:
raise Exception("Return code for getting the Admin org ID isn't 200. It is " + str(response.status_code))
for org in response.json()['results']:
if org['org_type'] == "admin":
found = True
admin_org_id = org['id']
if not found:
raise Exception("[ERROR] - The Admin Organization was not found in the IBM API Connect Cluster instance")
if DEBUG:
print(info(3) + "Admin Org ID: " + admin_org_id)
####################################
# Step 4 - Create the Email Server #
####################################
print(info(4) + "####################################")
print(info(4) + "# Step 4 - Create the Email Server #")
print(info(4) + "####################################")
url = 'https://' + environment_config["APIC_ADMIN_URL"] + '/api/orgs/' + admin_org_id + '/mail-servers'
# Create the data object
data = {}
data['title'] = 'Default Email Server'
data['name'] = 'default-email-server'
data['host'] = os.environ['EMAIL_HOST']
data['port'] = int(os.environ['EMAIL_PORT'])
credentials = {}
credentials['username'] = os.environ['EMAIL_USERNAME']
credentials['password'] = os.environ['EMAIL_PASSWORD']
data['credentials'] = credentials
data['tls_client_profile_url'] = None
data['secure'] = False
if DEBUG:
print(info(4) + "This is the data object:")
print(info(4), data)
print(info(4) + "This is the JSON dump:")
print(info(4), json.dumps(data))
response = api_calls.make_api_call(url, admin_bearer_token, 'post', data)
if response.status_code != 201:
raise Exception("Return code for creating the Email Server isn't 201. It is " + str(response.status_code))
email_server_url = response.json()['url']
if DEBUG:
print(info(4) + "Email Server url: " + email_server_url)
##################################################
# Step 5 - Sender and Email Server Configuration #
##################################################
print(info(5) + "##################################################")
print(info(5) + "# Step 5 - Sender and Email Server Configuration #")
print(info(5) + "##################################################")
url = 'https://' + environment_config["APIC_ADMIN_URL"] + '/api/cloud/settings'
# Create the data object
# Ideally this would also be loaded from a sealed secret
data = {}
data['mail_server_url'] = email_server_url
email_sender = {}
email_sender['name'] = 'APIC Administrator'
email_sender['address'] = 'test@test.com'
data['email_sender'] = email_sender
if DEBUG:
print(info(5) + "This is the data object:")
print(info(5), data)
print(info(5) + "This is the JSON dump:")
print(info(5), json.dumps(data))
response = api_calls.make_api_call(url, admin_bearer_token, 'put', data)
if response.status_code != 200:
raise Exception("Return code for Sender and Email Server configuration isn't 200. It is " + str(response.status_code))
#################################################
# Step 6 - Register the Default Gateway Service #
#################################################
print(info(6) + "#################################################")
print(info(6) + "# Step 6 - Register the Default Gateway Service #")
print(info(6) + "#################################################")
# First, we need to get the Datapower API Gateway instances details
url = 'https://' + environment_config["APIC_ADMIN_URL"] + '/api/cloud/integrations/gateway-service/datapower-api-gateway'
response = api_calls.make_api_call(url, admin_bearer_token, 'get')
if response.status_code != 200:
raise Exception("Return code for getting the Datapower API Gateway instances details isn't 200. It is " + str(response.status_code))
datapower_api_gateway_url = response.json()['url']
if DEBUG:
print(info(6) + "Email Server url: " + datapower_api_gateway_url)
# Second, we need to get the TLS server profiles
url = 'https://' + environment_config["APIC_ADMIN_URL"] + '/api/orgs/' + admin_org_id + '/tls-server-profiles'
response = api_calls.make_api_call(url, admin_bearer_token, 'get')
found = False
tls_server_profile_url = ''
if response.status_code != 200:
raise Exception("Return code for getting the TLS server profiles isn't 200. It is " + str(response.status_code))
for profile in response.json()['results']:
if profile['name'] == "tls-server-profile-default":
found = True
tls_server_profile_url = profile['url']
if not found:
raise Exception("[ERROR] - The default TLS server profile was not found in the IBM API Connect Cluster instance")
if DEBUG:
print(info(6) + "Default TLS server profile url: " + tls_server_profile_url)
# Third, we need to get the TLS client profiles
url = 'https://' + environment_config["APIC_ADMIN_URL"] + '/api/orgs/' + admin_org_id + '/tls-client-profiles'
response = api_calls.make_api_call(url, admin_bearer_token, 'get')
found = False
tls_client_profile_url = ''
if response.status_code != 200:
raise Exception("Return code for getting the TLS client profiles isn't 200. It is " + str(response.status_code))
for profile in response.json()['results']:
if profile['name'] == "gateway-management-client-default":
found = True
tls_client_profile_url = profile['url']
if not found:
raise Exception("[ERROR] - The Gateway Management TLS client profile was not found in the IBM API Connect Cluster instance")
if DEBUG:
print(info(6) + "Gateway Management TLS server profile url: " + tls_client_profile_url)
# Finally, we can actually make the REST call to get the Default Gateway Service registered
url = 'https://' + environment_config["APIC_ADMIN_URL"] + '/api/orgs/' + admin_org_id + '/availability-zones/availability-zone-default/gateway-services'
# Create the data object
data = {}
data['name'] = "default-gateway-service"
data['title'] = "Default Gateway Service"
data['summary'] = "Default Gateway Service that comes out of the box with API Connect Cluster v10"
data['endpoint'] = 'https://' + environment_config["APIC_GATEWAY_MANAGER_URL"]
data['api_endpoint_base'] = 'https://' + environment_config["APIC_GATEWAY_URL"]
data['tls_client_profile_url'] = tls_client_profile_url
data['gateway_service_type'] = 'datapower-api-gateway'
visibility = {}
visibility['type'] = 'public'
data['visibility'] = visibility
sni = []
sni_inner={}
sni_inner['host'] = '*'
sni_inner['tls_server_profile_url'] = tls_server_profile_url
sni.append(sni_inner)
data['sni'] = sni
data['integration_url'] = datapower_api_gateway_url
if DEBUG:
print(info(6) + "This is the data object:")
print(info(6), data)
print(info(6) + "This is the JSON dump:")
print(info(6), json.dumps(data))
response = api_calls.make_api_call(url, admin_bearer_token, 'post', data)
if response.status_code != 201:
raise Exception("Return code for registering the Default Gateway Service isn't 201. It is " + str(response.status_code))
# This will be needed in the last step when we associate this Gateway Service to the Sandbox catalog
gateway_service_id = response.json()['id']
if DEBUG:
print(info(6) + "Default Gateway Service ID: " + gateway_service_id)
###################################################
# Step 7 - Register the Default Analytics Service #
###################################################
print(info(7) + "###################################################")
print(info(7) + "# Step 7 - Register the Default Analytics Service #")
print(info(7) + "###################################################")
url = 'https://' + environment_config["APIC_ADMIN_URL"] + '/api/orgs/' + admin_org_id + '/availability-zones/availability-zone-default/analytics-services'
# Create the data object
data = {}
data['name'] = "default-analytics-service"
data['title'] = "Default Analytics Service"
data['summary'] = "Default Analytics Service that comes out of the box with API Connect Cluster v10"
data['endpoint'] = 'https://' + environment_config["APIC_ANALYTICS_CONSOLE_URL"]
if DEBUG:
print(info(7) + "This is the data object:")
print(info(7), data)
print(info(7) + "This is the JSON dump:")
print(info(7), json.dumps(data))
response = api_calls.make_api_call(url, admin_bearer_token, 'post', data)
if response.status_code != 201:
raise Exception("Return code for registering the Default Analytics Service isn't 201. It is " + str(response.status_code))
analytics_service_url = response.json()['url']
if DEBUG:
print(info(6) + "Default Analytics Service url: " + analytics_service_url)
#############################################################################
# Step 8 - Associate Default Analytics Service with Default Gateway Service #
#############################################################################
print(info(8) + "#############################################################################")
print(info(8) + "# Step 8 - Associate Default Analytics Service with Default Gateway Service #")
print(info(8) + "#############################################################################")
url = 'https://' + environment_config["APIC_ADMIN_URL"] + '/api/orgs/' + admin_org_id + '/availability-zones/availability-zone-default/gateway-services/default-gateway-service'
# Create the data object
data = {}
data['analytics_service_url'] = analytics_service_url
if DEBUG:
print(info(8) + "This is the data object:")
print(info(8), data)
print(info(8) + "This is the JSON dump:")
print(info(8), json.dumps(data))
response = api_calls.make_api_call(url, admin_bearer_token, 'patch', data)
if response.status_code != 200:
raise Exception("Return code for associating the Default Analytics Service with the Default Gateway Service isn't 200. It is " + str(response.status_code))
################################################
# Step 9 - Register the Default Portal Service #
################################################
print(info(9) + "################################################")
print(info(9) + "# Step 9 - Register the Default Portal Service #")
print(info(9) + "################################################")
url = 'https://' + environment_config["APIC_ADMIN_URL"] + '/api/orgs/' + admin_org_id + '/availability-zones/availability-zone-default/portal-services'
# Create the data object
data = {}
data['title'] = "Default Portal Service"
data['name'] = "default-portal-service"
data['summary'] = "Default Portal Service that comes out of the box with API Connect Cluster v10"
data['endpoint'] = 'https://' + environment_config["APIC_PORTAL_DIRECTOR_URL"]
data['web_endpoint_base'] = 'https://' + environment_config["APIC_PORTAL_WEB_URL"]
visibility = {}
visibility['group_urls'] = None
visibility['org_urls'] = None
visibility['type'] = 'public'
data['visibility'] = visibility
if DEBUG:
print(info(9) + "This is the data object:")
print(info(9), data)
print(info(9) + "This is the JSON dump:")
print(info(9), json.dumps(data))
response = api_calls.make_api_call(url, admin_bearer_token, 'post', data)
if response.status_code != 201:
raise Exception("Return code for registering the Default Portal Service isn't 201. It is " + str(response.status_code))
############################################
# Step 10 - Create a Provider Organization #
############################################
print(info(10) + "############################################")
print(info(10) + "# Step 10 - Create a Provider Organization #")
print(info(10) + "############################################")
# First, we need to get the user registries so that we can create a new user who will be the Provider Organization Owner
url = 'https://' + environment_config["APIC_ADMIN_URL"] + '/api/cloud/settings/user-registries'
response = api_calls.make_api_call(url, admin_bearer_token, 'get')
if response.status_code != 200:
raise Exception("Return code for retrieving the user registries isn't 200. It is " + str(response.status_code))
provider_user_registry_default_url = response.json()['provider_user_registry_default_url']
if DEBUG:
print(info(10) + "Default Provider User Registry url: " + provider_user_registry_default_url)
# Then, we need to register the user that will be the Provider Organization owner
url = provider_user_registry_default_url + '/users'
# Create the data object
# Ideally this should be loaded from a sealed secret.
# Using defaults for now.
data = {}
data['username'] = os.environ["PROV_ORG_OWNER_USERNAME"]
data['email'] = os.environ["PROV_ORG_OWNER_EMAIL"]
data['first_name'] = os.environ["PROV_ORG_OWNER_FIRST_NAME"]
data['last_name'] = os.environ["PROV_ORG_OWNER_LAST_NAME"]
data['password'] = os.environ["PROV_ORG_OWNER_PASSWORD"]
if DEBUG:
print(info(10) + "This is the data object:")
print(info(10), data)
print(info(10) + "This is the JSON dump:")
print(info(10), json.dumps(data))
response = api_calls.make_api_call(url, admin_bearer_token, 'post', data)
if response.status_code != 201:
raise Exception("Return code for registering the provider organization owner user isn't 201. It is " + str(response.status_code))
owner_url = response.json()['url']
if DEBUG:
print(info(10) + "Provider Organization Owner url: " + owner_url)
# Finally, we can create the Provider Organization with the previous owner
url = 'https://' + environment_config["APIC_ADMIN_URL"] + '/api/cloud/orgs'
# Compute the name of the Provider Organization from the title
po_name=os.environ["PROV_ORG_TITLE"].strip().replace(" ","-")
# Create the data object
# Ideally this should be loaded from a sealed secret.
# Using defaults for now.
data = {}
data['title'] = os.environ["PROV_ORG_TITLE"]
data['name'] = po_name.lower()
data['owner_url'] = owner_url
if DEBUG:
print(info(10) + "This is the data object:")
print(info(10), data)
print(info(10) + "This is the JSON dump:")
print(info(10), json.dumps(data))
response = api_calls.make_api_call(url, admin_bearer_token, 'post', data)
if response.status_code != 201:
raise Exception("Return code for creating the provider organization isn't 201. It is " + str(response.status_code))
###############################################################
# Step 11 - Get the IBM API Connect Provider API Bearer Token #
###############################################################
print(info(11) + "###############################################################")
print(info(11) + "# Step 11 - Get the IBM API Connect Provider API Bearer Token #")
print(info(11) + "###############################################################")
# Ideally, the username and password for getting the Bearer Token below would come from a sealed secret (that woul also be used
# in the previous step 10 when registering the new user for the provider organization owner)
# Using defaults for now.
admin_bearer_token = api_calls.get_bearer_token(environment_config["APIC_API_MANAGER_URL"],
os.environ["PROV_ORG_OWNER_USERNAME"],
os.environ["PROV_ORG_OWNER_PASSWORD"],
"provider/default-idp-2",
toolkit_credentials["toolkit"]["client_id"],
toolkit_credentials["toolkit"]["client_secret"])
if DEBUG:
print(info(11) + "This is the Bearer Token to work against the IBM API Connect API Management endpoints")
print(info(11) + "-------------------------------------------------------------------------------------")
print(info(11), admin_bearer_token)
#########################################################################
# Step 12 - Associate Default Gateway Services with the Sandbox catalog #
#########################################################################
print(info(12) + "#########################################################################")
print(info(12) + "# Step 12 - Associate Default Gateway Services with the Sandbox catalog #")
print(info(12) + "#########################################################################")
# First, we need to get the organization ID
url = 'https://' + environment_config["APIC_API_MANAGER_URL"] + '/api/orgs'
response = api_calls.make_api_call(url, admin_bearer_token, 'get')
found = False
provider_org_id = ''
if response.status_code != 200:
raise Exception("Return code for getting the Provider Org ID isn't 200. It is " + str(response.status_code))
for org in response.json()['results']:
if org['org_type'] == "provider":
found = True
provider_org_id = org['id']
if not found:
raise Exception("[ERROR] - The Provider Organization was not found in the IBM API Connect Cluster instance")
if DEBUG:
print(info(12) + "Provider Org ID: " + provider_org_id)
# Then, we need to get the Sandbox catalog ID
url = 'https://' + environment_config["APIC_API_MANAGER_URL"] + '/api/orgs/' + provider_org_id + '/catalogs'
response = api_calls.make_api_call(url, admin_bearer_token, 'get')
found = False
catalog_id = ''
if response.status_code != 200:
raise Exception("Return code for getting the Sandbox catalog ID isn't 200. It is " + str(response.status_code))
for catalog in response.json()['results']:
if catalog['name'] == catalog_name:
found = True
catalog_id = catalog['id']
if not found:
raise Exception("[ERROR] - The Sandbox catalog was not found in the IBM API Connect Cluster instance")
if DEBUG:
print(info(12) + "Sandbox catalog ID: " + catalog_id)
# Finally, we can associate the Default Gateway Service to the Sandbox catalog
url = 'https://' + environment_config["APIC_API_MANAGER_URL"] + '/api/catalogs/' + provider_org_id + '/' + catalog_id + '/configured-gateway-services'
# Create the data object
# Ideally this could also be loaded from a sealed secret.
# Using defaults for now.
gateway_service_url = 'https://' + environment_config["APIC_API_MANAGER_URL"] + '/api/orgs/' + provider_org_id + '/gateway-services/' + gateway_service_id
data = {}
data['gateway_service_url'] = gateway_service_url
if DEBUG:
print(info(12) + "This is the data object:")
print(info(12), data)
print(info(12) + "This is the JSON dump:")
print(info(12), json.dumps(data))
response = api_calls.make_api_call(url, admin_bearer_token, 'post', data)
if response.status_code != 201:
raise Exception("Return code for associating the Default Gateway Service to the Sandbox catalog isn't 201. It is " + str(response.status_code))
#######
# END #
#######
print("#######")
print("# END #")
print("#######")
except Exception as e:
raise Exception("[ERROR] - Exception in " + FILE_NAME + ": " + repr(e))
| 45.283784
| 180
| 0.564949
|
be9a1434c3fd8d3ee8f0e184a30beb28aae074fc
| 3,783
|
py
|
Python
|
tests/www/api/experimental/test_kerberos_endpoints.py
|
ayushSethi22/airflow
|
d8c473e9119286f5fdb769880134c76f40bf42f6
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/www/api/experimental/test_kerberos_endpoints.py
|
ayushSethi22/airflow
|
d8c473e9119286f5fdb769880134c76f40bf42f6
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 22
|
2019-12-09T23:22:07.000Z
|
2021-05-12T23:15:40.000Z
|
tests/www/api/experimental/test_kerberos_endpoints.py
|
ayushSethi22/airflow
|
d8c473e9119286f5fdb769880134c76f40bf42f6
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 5
|
2019-11-18T13:19:29.000Z
|
2020-03-25T13:20:29.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import mock
import os
import unittest
from datetime import datetime
from airflow import configuration
from airflow.api.auth.backend.kerberos_auth import CLIENT_AUTH
from airflow.utils.net import get_hostname
from airflow.www import app as application
@unittest.skipIf('KRB5_KTNAME' not in os.environ,
'Skipping Kerberos API tests due to missing KRB5_KTNAME')
class ApiKerberosTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
try:
configuration.conf.add_section("api")
except Exception:
pass
configuration.conf.set("api",
"auth_backend",
"airflow.api.auth.backend.kerberos_auth")
try:
configuration.conf.add_section("kerberos")
except Exception:
pass
configuration.conf.set("kerberos",
"keytab",
os.environ['KRB5_KTNAME'])
self.app = application.create_app(testing=True)
def test_trigger_dag(self):
with self.app.test_client() as c:
url_template = '/api/experimental/dags/{}/dag_runs'
response = c.post(
url_template.format('example_bash_operator'),
data=json.dumps(dict(run_id='my_run' + datetime.now().isoformat())),
content_type="application/json"
)
self.assertEqual(401, response.status_code)
response.url = 'http://{}'.format(get_hostname())
class Request:
headers = {}
response.request = Request()
response.content = ''
response.raw = mock.MagicMock()
response.connection = mock.MagicMock()
response.connection.send = mock.MagicMock()
# disable mutual authentication for testing
CLIENT_AUTH.mutual_authentication = 3
# case can influence the results
CLIENT_AUTH.hostname_override = get_hostname()
CLIENT_AUTH.handle_response(response)
self.assertIn('Authorization', response.request.headers)
response2 = c.post(
url_template.format('example_bash_operator'),
data=json.dumps(dict(run_id='my_run' + datetime.now().isoformat())),
content_type="application/json",
headers=response.request.headers
)
self.assertEqual(200, response2.status_code)
def test_unauthorized(self):
with self.app.test_client() as c:
url_template = '/api/experimental/dags/{}/dag_runs'
response = c.post(
url_template.format('example_bash_operator'),
data=json.dumps(dict(run_id='my_run' + datetime.now().isoformat())),
content_type="application/json"
)
self.assertEqual(401, response.status_code)
| 36.728155
| 84
| 0.628337
|
042a702908fec92602748874914051af2d3969fc
| 1,424
|
py
|
Python
|
examples/sdk_examples/build_index.py
|
AlexeyPichugin/lunasdk
|
e1e3cb053c0969abf6f5419617654372ac89f61b
|
[
"MIT"
] | 8
|
2019-04-17T06:50:43.000Z
|
2022-02-09T07:54:28.000Z
|
examples/sdk_examples/build_index.py
|
AlexeyPichugin/lunasdk
|
e1e3cb053c0969abf6f5419617654372ac89f61b
|
[
"MIT"
] | 71
|
2019-04-17T06:50:48.000Z
|
2022-03-22T22:12:43.000Z
|
examples/sdk_examples/build_index.py
|
AlexeyPichugin/lunasdk
|
e1e3cb053c0969abf6f5419617654372ac89f61b
|
[
"MIT"
] | 7
|
2019-10-14T07:13:29.000Z
|
2022-03-24T08:00:36.000Z
|
"""Module realize simple examples following features:
* build index with descriptors
* search for descriptors with the shorter distance to passed descriptor
"""
import pprint
from lunavl.sdk.faceengine.engine import VLFaceEngine
from lunavl.sdk.faceengine.setting_provider import DetectorType
from lunavl.sdk.image_utils.image import VLImage
from resources import EXAMPLE_O, EXAMPLE_1
def buildDescriptorIndex():
"""
Build index and search.
"""
faceEngine = VLFaceEngine()
detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
warper = faceEngine.createFaceWarper()
extractor = faceEngine.createFaceDescriptorEstimator()
descriptorsBatch = faceEngine.createFaceDescriptorFactory().generateDescriptorsBatch(2)
for image in (EXAMPLE_O, EXAMPLE_1):
vlImage = VLImage.load(filename=image)
faceDetection = detector.detectOne(vlImage)
warp = warper.warp(faceDetection)
faceDescriptor = extractor.estimate(warp.warpedImage)
descriptorsBatch.append(faceDescriptor)
indexBuilder = faceEngine.createIndexBuilder()
indexBuilder.appendBatch(descriptorsBatch)
pprint.pprint(f"index buf size: {indexBuilder.bufSize}")
index = indexBuilder.buildIndex()
pprint.pprint(index[0])
result = index.search(faceDescriptor, 1)
pprint.pprint(f"result: {result}")
if __name__ == "__main__":
buildDescriptorIndex()
| 34.731707
| 91
| 0.753511
|
a35c22e4b16f5f6384bda48c42f6e8c0f20fde79
| 5,281
|
py
|
Python
|
src/bin/shipyard_airflow/shipyard_airflow/plugins/armada_base_operator.py
|
rb560u/airship-shipyard
|
01b6960c1f80b44d1db31c081139649c40b82308
|
[
"Apache-2.0"
] | 12
|
2018-05-18T18:59:23.000Z
|
2019-05-10T12:31:44.000Z
|
src/bin/shipyard_airflow/shipyard_airflow/plugins/armada_base_operator.py
|
rb560u/airship-shipyard
|
01b6960c1f80b44d1db31c081139649c40b82308
|
[
"Apache-2.0"
] | 4
|
2021-07-28T14:36:57.000Z
|
2022-03-22T16:39:23.000Z
|
src/bin/shipyard_airflow/shipyard_airflow/plugins/armada_base_operator.py
|
rb560u/airship-shipyard
|
01b6960c1f80b44d1db31c081139649c40b82308
|
[
"Apache-2.0"
] | 9
|
2018-05-18T16:42:41.000Z
|
2019-04-18T20:12:14.000Z
|
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from urllib.parse import urlparse
from airflow.exceptions import AirflowException
from airflow.plugins_manager import AirflowPlugin
from airflow.utils.decorators import apply_defaults
import armada.common.client as client
import armada.common.session as session
from armada.exceptions import api_exceptions as errors
try:
import service_endpoint
from service_token import shipyard_service_token
from ucp_base_operator import UcpBaseOperator
from xcom_pusher import XcomPusher
except ImportError:
from shipyard_airflow.plugins import service_endpoint
from shipyard_airflow.plugins.service_token import shipyard_service_token
from shipyard_airflow.plugins.ucp_base_operator import UcpBaseOperator
from shipyard_airflow.plugins.xcom_pusher import XcomPusher
LOG = logging.getLogger(__name__)
class ArmadaBaseOperator(UcpBaseOperator):
"""Armada Base Operator
All armada related workflow operators will use the aramda
base operator as the parent and inherit attributes and methods
from this class
"""
@apply_defaults
def __init__(self,
query={},
svc_session=None,
svc_token=None,
*args, **kwargs):
"""Initialization of ArmadaBaseOperator object.
:param query: A dictionary containing explicit query string parameters
:param svc_session: Keystone Session
:param svc_token: Keystone Token
The Armada operator assumes that prior steps have set xcoms for
the action and the deployment configuration
"""
super(ArmadaBaseOperator,
self).__init__(
pod_selector_pattern=[{'pod_pattern': 'armada-api',
'container': 'armada-api'}],
*args, **kwargs)
self.query = query
self.svc_session = svc_session
self.svc_token = svc_token
@shipyard_service_token
def run_base(self, context):
# Set up xcom_pusher to push values to xcom
self.xcom_pusher = XcomPusher(self.task_instance)
# Logs uuid of action performed by the Operator
LOG.info("Armada Operator for action %s", self.action_id)
# Set up armada client
self.armada_client = self._init_armada_client(
self.endpoints.endpoint_by_name(service_endpoint.ARMADA),
self.svc_token,
self.context_marker,
self.user
)
@staticmethod
def _init_armada_client(armada_svc_endpoint, svc_token,
ext_marker, end_user):
LOG.info("Armada endpoint is %s", armada_svc_endpoint)
# Parse Armada Service Endpoint
armada_url = urlparse(armada_svc_endpoint)
# Build a ArmadaSession with credentials and target host
# information.
LOG.info("Build Armada Session")
a_session = session.ArmadaSession(host=armada_url.hostname,
port=armada_url.port,
scheme='http',
token=svc_token,
marker=ext_marker,
end_user=end_user)
# Raise Exception if we are not able to set up the session
if a_session:
LOG.info("Successfully Set Up Armada Session")
else:
raise AirflowException("Failed to set up Armada Session!")
# Use the ArmadaSession to build a ArmadaClient that can
# be used to make one or more API calls
LOG.info("Create Armada Client")
_armada_client = client.ArmadaClient(a_session)
# Raise Exception if we are not able to build armada client
if _armada_client:
LOG.info("Successfully Set Up Armada client")
return _armada_client
else:
raise AirflowException("Failed to set up Armada client!")
def get_releases(self):
"""Retrieve all deployed releases"""
try:
get_releases_resp = self.armada_client.get_releases(
query=self.query,
timeout=self.dc['armada.get_releases_timeout']
)
return get_releases_resp['releases']
except errors.ClientError as client_error:
# Dump logs from Armada pods
self.get_k8s_logs()
raise AirflowException(client_error)
class ArmadaBaseOperatorPlugin(AirflowPlugin):
"""Creates ArmadaBaseOperator in Airflow."""
name = 'armada_base_operator_plugin'
operators = [ArmadaBaseOperator]
| 34.97351
| 78
| 0.655558
|
a2eb5ba2c495078f635d851ac80752120c307e91
| 21,926
|
py
|
Python
|
src/mobile_control/mobile_4wis_4wid_hoffman_path_tracking_without_servo.py
|
Musyue/mobile_robot
|
ed8a75c41e8ccaf1b48639239e4119bf2d1d5f65
|
[
"MIT"
] | 6
|
2019-08-16T07:29:21.000Z
|
2021-12-27T01:25:35.000Z
|
src/mobile_control/mobile_4wis_4wid_hoffman_path_tracking_without_servo.py
|
Musyue/mobile_robot
|
ed8a75c41e8ccaf1b48639239e4119bf2d1d5f65
|
[
"MIT"
] | null | null | null |
src/mobile_control/mobile_4wis_4wid_hoffman_path_tracking_without_servo.py
|
Musyue/mobile_robot
|
ed8a75c41e8ccaf1b48639239e4119bf2d1d5f65
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# coding=utf-8
import rospy
import sys
from std_msgs.msg import String,Float64,Bool,Int64MultiArray
from sensor_msgs.msg import Imu
import time
from math import *
import numpy as np
from mobile_control.mobileplatform_driver_steptech import *
from geometry_msgs.msg import Twist,Pose
from scipy.io import loadmat
import tf2_ros
from nav_msgs.msg import Path
from tf.transformations import euler_from_quaternion, quaternion_from_euler
# import matplotlib.pyplot as plt
class AGV4WDICONTROLLER():
def __init__(self):
self.mpfh=MobilePlatformDriver()
rospy.init_node("imu_data_for_mobileplatform")
self.wheel_R=0.15/2#m
self.car_length=0.5
self.car_width=0.395
self.imu_sub=rospy.Subscriber('/imu_data',Imu,self.Imu_callback)
self.cmd_vel_sub=rospy.Subscriber('/cmd_vel',Twist,self.CmdVel_callback)
self.path_sub=rospy.Subscriber('/mobile_base_path',Path,self.PathTarget_callback)
self.ImuOrientation=()
self.ImuAngularvelocity=()
self.ImuLinearAcceleration=()
self.ImuOrientationCovariance=[]
self.ImuAngularvelocityCovariance=[]
self.ImuLinearAccelerationCovariance=[]
self.linear_x=0.00001
self.linear_y=0
self.linear_z=0
self.angular_x=0
self.angular_y=0
self.angular_z=0.00001
self.speed_rotation=[]
self.odemetry_x=0.0#self.trans.transform.translation.x
self.odemetry_y=0.0#self.trans.transform.translation.y
self.odemetry_pha=0.0#3.14
self.odemetry_beta=0.0
self.vel_reference=0.05#1.5#0.5
self.reference_x=0
self.reference_y=0
self.reference_pha=0
self.reference_beta=0
####hoffamn
self.tfBuffer = tf2_ros.Buffer()
self.listener = tf2_ros.TransformListener(self.tfBuffer)
self.tranformtfs="map"
self.tranformtft='base_link'
# self.trans = self.tfBuffer.lookup_transform(self.tranformtfs, self.tranformtft, rospy.Time())
self.pose_x=0.0
self.pose_y=0.
self.pose_z=0.
self.pose_quaternion_x=0.
self.pose_quaternion_y=0.
self.pose_quaternion_z=0.0
self.pose_quaternion_w=0.0
self.roll=0
self.pitch=0
self.yaw=0
self.st=tan(0)
self.phi=0#gama
self.index_ref=0
self.phaRdot=0.08
self.betaRdot=0
self.kk=0.1
self.limit_steer_rad=10
self.path_all=[]
self.read_path=loadmat('/data/ros/yue_wk_2019/src/mobile_robot/src/mobile_control/circle_shape_path_2.mat')#figure_eight_path.mat')
# self.pub_vstar=rospy.Publisher("/vstar",Float64,queue_size=10)
self.pub_xr=rospy.Publisher("/xr",Float64,queue_size=10)
self.pub_yr=rospy.Publisher("/yr",Float64,queue_size=10)
self.pub_target_pha=rospy.Publisher("/target_pha",Float64,queue_size=10)
self.pub_x=rospy.Publisher("/x",Float64,queue_size=10)
self.pub_y=rospy.Publisher("/y",Float64,queue_size=10)
self.pub_real_pha=rospy.Publisher("/real_pha",Float64,queue_size=10)
self.pub_Vfl=rospy.Publisher("/vfl",Float64,queue_size=10)
self.pub_Vfr=rospy.Publisher("/vfr",Float64,queue_size=10)
self.pub_Vrl=rospy.Publisher("/vrl",Float64,queue_size=10)
self.pub_Vrr=rospy.Publisher("/vrr",Float64,queue_size=10)
self.pub_angular_error=rospy.Publisher("/angular_phi",Float64,queue_size=10)
self.pub_angular_e=rospy.Publisher("/angular_e",Float64,queue_size=10)
self.pub_error=rospy.Publisher("/distance_error",Float64,queue_size=10)
self.pub_detafl=rospy.Publisher("/detafl",Float64,queue_size=10)
self.pub_detafr=rospy.Publisher("/detafr",Float64,queue_size=10)
self.pub_detarl=rospy.Publisher("/detarl",Float64,queue_size=10)
self.pub_detarr=rospy.Publisher("/detarr",Float64,queue_size=10)
self.pub_detafl_initial=rospy.Publisher("/detafli",Float64,queue_size=10)
self.pub_detafr_initial=rospy.Publisher("/detafri",Float64,queue_size=10)
# Vfl,Vfr,Vrl,Vrr,detafl,detafr,detarl,detarr
self.target_path=[]
self.homing_original_position=[self.mpfh.Driver_steer_encode_fl_original,self.mpfh.Driver_steer_encode_fr_original,self.mpfh.Driver_steer_encode_rl_original,self.mpfh.Driver_steer_encode_rr_original]
def CmdVel_callback(self,msg):
# print "msg",msg.linear.x
self.linear_x=msg.linear.x
self.linear_y=msg.linear.y
self.linear_z=msg.linear.z
self.angular_x=msg.angular.x
self.angular_y=msg.angular.y
self.angular_z=msg.angular.z
# def PathTarget_callback(self,msg):
# self.pose_x=msg.pose.position.x
# self.pose_y=msg.pose.position.y
# self.pose_z=msg.pose.position.z
# self.pose_quaternion_x=msg.pose.orientation.x
# self.pose_quaternion_y=msg.pose.orientation.y
# self.pose_quaternion_z=msg.pose.orientation.z
# self.pose_quaternion_w=msg.pose.orientation.w
# orientation_list = [msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w]
# (self.roll,self.pitch,self.yaw) = euler_from_quaternion (orientation_list)
def PathTarget_callback(self,msg):
for i in range(len(msg.poses)):
self.pose_x=msg.poses[i].pose.position.x
self.pose_y=msg.poses[i].pose.position.y
self.pose_z=msg.poses[i].pose.position.z
self.pose_quaternion_x=msg.poses[i].pose.orientation.x
self.pose_quaternion_y=msg.poses[i].pose.orientation.y
self.pose_quaternion_z=msg.poses[i].pose.orientation.z
self.pose_quaternion_w=msg.poses[i].pose.orientation.w
orientation_list = [self.pose_quaternion_x, self.pose_quaternion_y, self.pose_quaternion_z,self.pose_quaternion_w]
(self.roll,self.pitch,self.yaw) = euler_from_quaternion (orientation_list)
self.path_all.append([self.pose_x,self.pose_y,self.yaw])
print("-------path---data----",[self.pose_x,self.pose_y,self.pose_z,self.roll,self.pitch,self.yaw])
def Avage_list(self,listdata,appendata):
if len(listdata)>10:
listdata=listdata[1:]
listdata.append(appendata)
else:
listdata.append(appendata)
return listdata
def Init_Node(self):
self.mpfh.Init_can()
self.mpfh.Open_driver_can_Node(0x00000000,1)
self.mpfh.Clear_all_error_without_disable_driver()
self.mpfh.Enable_Motor_Controller_All()
self.mpfh.Send_trapezoid_Velocity(2500)
def Imu_callback(self,msg):
self.ImuOrientation=(msg.orientation.x,msg.orientation.y,msg.orientation.z,msg.orientation.w)
self.ImuAngularvelocity=(msg.angular_velocity.x,msg.angular_velocity.y,msg.angular_velocity.z)
self.ImuLinearAcceleration=(msg.linear_acceleration.x,msg.linear_acceleration.y,msg.linear_acceleration.z)
self.ImuOrientationCovariance=msg.orientation_covariance
self.ImuAngularvelocityCovariance=msg.angular_velocity_covariance
self.ImuLinearAccelerationCovariance=msg.linear_acceleration_covariance
def set_pdemetry_vel(self,vel):
self.odemetry_vel=vel
def set_pdemetry_x(self,x):
self.odemetry_x=x
def set_pdemetry_y(self,y):
self.odemetry_y=y
def set_pdemetry_theta(self,theta):
self.odemetry_theta=theta
def andiff(self,th1,th2):
d=th1-th2
print("----d------",d)
return d#self.mod_function(d+pi, 2*pi) - pi
def Caculate_velocity_from_angular_z(self,angular_velocity_z,gamma_rad):
vel=(angular_velocity_z*self.car_length)/tan(gamma_rad)
return vel
def Caculate_velocity_from_RPM(self):
RPM_fl=self.mpfh.Dec_to_RPM(self.mpfh.Driver_walk_velocity_encode_fl)
RPM_fr=self.mpfh.Dec_to_RPM(self.mpfh.Driver_walk_velocity_encode_fr)
RPM_rl=self.mpfh.Dec_to_RPM(self.mpfh.Driver_walk_velocity_encode_rl)
RPM_rr=self.mpfh.Dec_to_RPM(self.mpfh.Driver_walk_velocity_encode_rr)
print("RPM_fl",RPM_fl,RPM_fr,RPM_rl,RPM_rr)
if 0 not in [RPM_fl,RPM_fr,RPM_rl,RPM_rr]:
Velocity=[(RPM_fl*2*pi*self.wheel_R)/60.0,(RPM_fr*2*pi*self.wheel_R)/60.0,(RPM_rl*2*pi*self.wheel_R)/60.0,(RPM_rr*2*pi*self.wheel_R)/60.0]
print("------Velocity---------",Velocity)#self.mpfh.Driver_walk_velocity_encode_fl
return Velocity
else:
Velocity=[(RPM_fl*2*pi*self.wheel_R)/60.0,(RPM_fr*2*pi*self.wheel_R)/60.0,(RPM_rl*2*pi*self.wheel_R)/60.0,(RPM_rr*2*pi*self.wheel_R)/60.0]
print("----some zero in list for velocity---",Velocity)
return [-1.0*self.vel_reference,1.0*self.vel_reference,-1.0*self.vel_reference,1.0*self.vel_reference]
# return [-0.50*self.vel_reference,0.50*self.vel_reference,-0.50*self.vel_reference,0.50*self.vel_reference]
# print "there are velocity error in encode"
def Caculate_rad_from_position_data(self):
detafi=self.mpfh.Pos_to_rad(self.mpfh.Driver_steer_encode_fl-self.mpfh.Driver_steer_encode_fl_original)
detafo=self.mpfh.Pos_to_rad(self.mpfh.Driver_steer_encode_fr-self.mpfh.Driver_steer_encode_fr_original)
detari=self.mpfh.Pos_to_rad(self.mpfh.Driver_steer_encode_rl-self.mpfh.Driver_steer_encode_rl_original)
detaro=self.mpfh.Pos_to_rad(self.mpfh.Driver_steer_encode_rr-self.mpfh.Driver_steer_encode_rr_original)
print("self.mpfh.Driver_steer_encode_fl",self.mpfh.Driver_steer_encode_fl,self.mpfh.Driver_steer_encode_fr,self.mpfh.Driver_steer_encode_rl,self.mpfh.Driver_steer_encode_rr)
return [detafi,detafo,detari,detaro]
def caculate_bicycle_model_thetafr_re(self,VA,phadot):
# print self.linear_x,self.angular_z
thetafr=atan((phadot*self.car_length)/(2.0*VA))
thetare=atan(-tan(thetafr))
return [thetafr,thetare]
def my_arccot(self,x):
"""
Using pi/2-atan(x) to caculate arccot
"""
return pi/2-atan(x)
# if x>0:
# return atan(1/x)+pi-3328842.5883102473,
# elif x<0:
# return atan(1/x)
# else:
# return 0.0
def Set_rad_in_halfpi(self,rad):
"""
Make rad in 0-pi/2
"""
if rad<pi/2.0:
return rad
else:
return rad-pi
# return (rad+pi/2.0)%(pi/2.0) - pi/2.0
def Set_rad_in_pi(self,rad):
return (rad+pi)%(pi*2.0) - pi
def sign(self,x):
if x!=0.0:
return x/abs(x)
else:
return 0.0
def caculate_pos_negtive_phi(self,target,realpoint):
if abs(target-realpoint)>pi:
if target<0:
return target-realpoint+2*pi
elif target>0:
return target+realpoint+2*pi
else:
return target-realpoint
else:
return target-realpoint
def hoffman_control_tf2(self,point_ref_all,odemetry_x,odemetry_y,odemetry_pha):
self.odemetry_x=odemetry_x#self.odemetry_x+self.vel_reference*cos(self.odemetry_pha)*dt
self.odemetry_y=odemetry_y#self.odemetry_y+self.vel_reference*sin(self.odemetry_pha)*dt
self.odemetry_pha=odemetry_pha#self.odemetry_pha+self.vel_reference*(1.0/self.car_length)*self.st*dt
self.odemetry_pha=self.Set_rad_in_pi(self.odemetry_pha)
e=sqrt((self.odemetry_x-point_ref_all[0][0])**2+(self.odemetry_y-point_ref_all[0][1])**2)
for i in range(1,len(point_ref_all)):
etmp=sqrt((self.odemetry_x-point_ref_all[i][0])**2+(self.odemetry_y-point_ref_all[i][1])**2)
if etmp<e:
e=etmp
self.index_ref=i
print("self.index_ref",self.index_ref)
point_ref=point_ref_all[self.index_ref]
ex1=point_ref[0]-self.odemetry_x
ey1=point_ref[1]-self.odemetry_y
self.pub_xr.publish(point_ref[0])
self.pub_yr.publish(point_ref[1])
self.pub_target_pha.publish(point_ref[2])
ex2=cos(point_ref[2])
ey2=sin(point_ref[2])
sinnn=-1*(ex1*ey2-ey1*ex2)
e=self.sign(sinnn)*e
self.pub_angular_e.publish(e)
# limit_degree=30.0
# temp_phi=point_ref[2]- self.odemetry_pha+atan(self.kk*e/self.vel_reference)
# if abs(temp_phi)>(limit_degree*pi/180):
# if temp_phi>0.0:
# self.phi=limit_degree*pi/180
# elif temp_phi<0.0:
# self.phi=-limit_degree*pi/180
# else:
# self.phi=0.0
# else:
# self.phi=temp_phi
self.phi=point_ref[2]- self.odemetry_pha+atan(self.kk*e/self.vel_reference)
# self.phi=self.caculate_pos_negtive_phi(point_ref[2],self.odemetry_pha)+atan(self.kk*e/self.vel_reference)
self.st=tan(self.phi)
if abs(tan(self.st))>=self.limit_steer_rad:
self.st=self.limit_steer_rad*self.sign(self.st)
else:
self.st=self.st
self.phi=atan(self.st)
self.pub_angular_error.publish(self.phi)
def target_distance_error(self,x,y):
e=sqrt((self.path_all[-1][0]-x)**2+(self.path_all[-1][1]-y)**2)
self.pub_error.publish(e)
if e<0.15:
print("distance in line error----")
return True
else:
return False
def set_array_to_list(self,array_num):
newtemp=[]
for i in range(len(array_num)):
newtemp.append(list(array_num[i]))
return newtemp
def hoffman_kinematic_model(self,VC,phi_ref):
Vfl=VC*sqrt(self.car_length**2+(self.car_length/tan(phi_ref)+self.car_width/2.0)**2)/(self.car_length/tan(phi_ref))
Vfr=VC*sqrt(self.car_length**2+(self.car_length/tan(phi_ref)-self.car_width/2.0)**2)/(self.car_length/tan(phi_ref))
Vrl=VC*(self.car_length/tan(phi_ref)+self.car_width/2.0)/(self.car_length/tan(phi_ref))
Vrr=VC*(self.car_length/tan(phi_ref)-self.car_width/2.0)/(self.car_length/tan(phi_ref))
detarl=0.0
detarr=0.0
detafl=atan(self.car_length/(self.car_length/tan(phi_ref)-self.car_width/2.0))
detafr=atan(self.car_length/(self.car_length/tan(phi_ref)+self.car_width/2.0))
self.pub_Vfl.publish(Vfl)
self.pub_Vfr.publish(Vfr)
self.pub_Vrl.publish(Vrl)
self.pub_Vrr.publish(Vrr)
self.pub_detafl.publish(detafl)
self.pub_detafr.publish(detafr)
return [Vfl,Vfr,Vrl,Vrr,detafl,detafr,detarl,detarr]
def hoffman_kinematic_model_new(self,VC,phi_ref):
Vfl=VC*sqrt((0.5*self.car_length)**2+(0.5*self.car_length/tan(phi_ref)-self.car_width/2.0)**2)/(0.5*self.car_length/tan(phi_ref))
Vfr=VC*sqrt((0.5*self.car_length)**2+(0.5*self.car_length/tan(phi_ref)+self.car_width/2.0)**2)/(0.5*self.car_length/tan(phi_ref))#VC*sqrt(self.car_length**2+(self.car_length/tan(phi_ref)-self.car_width/2.0)**2)/(self.car_length/tan(phi_ref))
Vrl=Vfl#VC*(self.car_length/tan(phi_ref)+self.car_width/2.0)/(self.car_length/tan(phi_ref))
Vrr=Vfr#VC*(self.car_length/tan(phi_ref)-self.car_width/2.0)/(self.car_length/tan(phi_ref))
detafl=atan(0.5*self.car_length/(0.5*self.car_length/tan(phi_ref)-self.car_width/2.0))
detafr=atan(0.5*self.car_length/(0.5*self.car_length/tan(phi_ref)+self.car_width/2.0))
self.pub_detafl_initial.publish(detafl)
self.pub_detafr_initial.publish(detafr)
limit_degree=30.0
if abs(detafl)>(limit_degree*pi/180):
if detafl>0.0:
detafl=limit_degree*pi/180
elif detafl<0.0:
detafl=-limit_degree*pi/180
else:
detafl=0.0
else:
detafl=detafl
if abs(detafr)>(limit_degree*pi/180):
if detafr>0.0:
detafr=limit_degree*pi/180
elif detafr<0.0:
detafr=-limit_degree*pi/180
else:
detafr=0.0
else:
detafr=detafr
detarl=-detafl
detarr=-detafr
self.pub_Vfl.publish(Vfl)
self.pub_Vfr.publish(Vfr)
self.pub_Vrl.publish(Vrl)
self.pub_Vrr.publish(Vrr)
self.pub_detafl.publish(detafl)
self.pub_detafr.publish(detafr)
self.pub_detarl.publish(detarl)
self.pub_detarr.publish(detarr)
return [Vfl,Vfr,Vrl,Vrr,detafl,detafr,detarl,detarr]
def main():
agvobj=AGV4WDICONTROLLER()
agvobj.Init_Node()
time.sleep(3)
ratet=1
rate=rospy.Rate(ratet)
zerotime=time.time()
dt=0
flg=0
count=1
xr=[]
yr=[]
x=[]
y=[]
# plt.ion() #开启interactive mode 成功的关键函数
# plt.figure(1)
# agvobj.add_target()
# plt.show()
# postion_list
speedfl=0
speedfr=0
speedrl=0
speedrr=0
flagg=1
flaggg=1
pathfilename='path'#'pathsmallCirclexythera'
while not rospy.is_shutdown():
if len(agvobj.path_all)!=0:
try:
trans = agvobj.tfBuffer.lookup_transform('map', 'base_link', rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print("tf2 have nothing-----")
rate.sleep()
continue
recevenum=agvobj.mpfh.CanAnalysis.Can_GetReceiveNum(0)
starttime=time.time()
print("recevenum----",recevenum)
if recevenum!=None and flagg==1:
if flg==0:
agvobj.mpfh.Send_same_velocity_to_four_walking_wheel([-1.0,1.0,-1.0,1.0],1,agvobj.vel_reference)
# time.sleep(0.5)
# agvobj.mpfh.Send_position_to_four_steering_wheel(agvobj.homing_original_position)
flg=1
agvobj.mpfh.Read_sensor_data_from_driver()
odemetry_xx=trans.transform.translation.x
odemetry_yy=trans.transform.translation.y
print("odemetry_xx,odemetry_yy",odemetry_xx,odemetry_yy)
ratation_quaternion=[trans.transform.rotation.x,trans.transform.rotation.y,trans.transform.rotation.z,trans.transform.rotation.w]
rr,pp,yy=euler_from_quaternion(ratation_quaternion)
print("mobile real -----rpy----",rr,pp,yy)
odemetry_pha_pha=yy
# pathreference=agvobj.set_array_to_list(agvobj.read_path[pathfilename])
# print "pathreference",pathreference,len(pathreference)
agvobj.hoffman_control_tf2(agvobj.path_all,odemetry_xx,odemetry_yy,odemetry_pha_pha)
if agvobj.target_distance_error(odemetry_xx,odemetry_yy):
print("------walking wheel------")
agvobj.mpfh.Send_same_velocity_to_four_walking_wheel([-1.0,1.0,-1.0,1.0],1,0.0)
time.sleep(1)
flagg=0
velocity_real_time=agvobj.Caculate_velocity_from_RPM()
if len(velocity_real_time)!=0:
rad_real_time=agvobj.Caculate_rad_from_position_data()
print("velocity_real_time",velocity_real_time)
print("rad_real_time",rad_real_time)
Vfi=velocity_real_time[0]
Vfo=velocity_real_time[1]
Vri=velocity_real_time[2]
Vro=velocity_real_time[3]
detafi=rad_real_time[0]
detafo=rad_real_time[1]
detari=rad_real_time[2]
detaro=rad_real_time[3]
# VC=(agvobj.sign(Vri)*Vri+agvobj.sign(Vro)*Vro)/2.
VC=agvobj.vel_reference
print("VC-------",VC)
v_deta=agvobj.hoffman_kinematic_model_new(VC,agvobj.phi)
print("four velocity and four steer---",v_deta)
wheel_diretion_flg=[-1.0,1.0,-1.0,1.0]
wheel_diretion_flg1=[1.0,1.0,1.0,1.0]
speed_flag=[1.0,-1.0,-1.0,-1.0]
speedfl=wheel_diretion_flg1[0]*abs(v_deta[0])
speedfr=wheel_diretion_flg1[1]*abs(v_deta[1])
speedrl=wheel_diretion_flg1[2]*abs(v_deta[2])
speedrr=wheel_diretion_flg1[3]*abs(v_deta[3])
print("speedfl,speedfr,speedrl,speedrr",speedfl,speedfr,speedrl,speedrr)
agvobj.mpfh.Send_diff_velocity_to_four_walking_wheel(wheel_diretion_flg,speed_flag,speedfl,speedfr,speedrl,speedrr)
ratation_flag=[-1.0,-1.0,-1.0,-1.0]
four_steer_degree_theta=v_deta[4:]
agvobj.mpfh.Send_diff_degree_position_to_four_steering_wheel(ratation_flag,four_steer_degree_theta)
agvobj.pub_x.publish(agvobj.odemetry_x)
agvobj.pub_y.publish(agvobj.odemetry_y)
agvobj.pub_real_pha.publish(agvobj.odemetry_pha)
count+=1
print("count",count)
else:
if flagg==0:
agvobj.mpfh.Send_same_velocity_to_four_walking_wheel([-1.0,1.0,-1.0,1.0],1,0.0)
agvobj.mpfh.Send_diff_degree_position_to_four_steering_wheel(ratation_flag,[0.0,0.0,0.0,0.0])
agvobj.mpfh.Send_Control_Command(agvobj.mpfh.CanAnalysis.yamlDic['sync_data_ID'], agvobj.mpfh.MobileDriver_Command.ZERO_COMMAND)
print("---------read data----")
endtime=time.time()
dt=endtime-starttime
# agvobj.
else:
print("----wait path----",agvobj.path_all)
# continue
rate.sleep()
agvobj.mpfh.CanAnalysis.Can_VCICloseDevice()
agvobj.mpfh.Close_driver_can_Node(0x00000000,1)
if __name__=="__main__":
main()
| 47.051502
| 249
| 0.643939
|
ba759def51e7543ba88ae83a81f0fe0c0faff9f3
| 223
|
py
|
Python
|
centralized_pre_commit_conf/__init__.py
|
Pierre-Sassoulas/centralized-pre-commit-conf
|
84d4397a93157ccc2fc67f9eb8c9076787118ec2
|
[
"MIT"
] | 6
|
2020-05-25T07:11:53.000Z
|
2021-02-03T01:53:14.000Z
|
centralized_pre_commit_conf/__init__.py
|
Pierre-Sassoulas/centralized-pre-commit-conf
|
84d4397a93157ccc2fc67f9eb8c9076787118ec2
|
[
"MIT"
] | 1
|
2020-09-02T15:51:38.000Z
|
2020-09-02T15:51:38.000Z
|
centralized_pre_commit_conf/__init__.py
|
Pierre-Sassoulas/centralized-pre-commit-conf
|
84d4397a93157ccc2fc67f9eb8c9076787118ec2
|
[
"MIT"
] | 1
|
2020-07-22T08:03:45.000Z
|
2020-07-22T08:03:45.000Z
|
"""Easily install and update centralized pre-commit hooks and their configuration files in decentralized repositories"""
from centralized_pre_commit_conf.install import install
__version__ = "0.4.0"
__all__ = ["install"]
| 31.857143
| 120
| 0.802691
|
d93b4e5f84e92f155f767054c3803e1bc14aac3e
| 304
|
py
|
Python
|
wagtail_graphql/apps.py
|
yatesrates/wagtail-graphql-api
|
8183c3c69340c9a48e2c352fc398e8bd255efa96
|
[
"BSD-3-Clause"
] | 4
|
2019-07-19T21:17:02.000Z
|
2020-08-30T05:15:07.000Z
|
wagtail_graphql/apps.py
|
yatesrates/wagtail-graphql-api
|
8183c3c69340c9a48e2c352fc398e8bd255efa96
|
[
"BSD-3-Clause"
] | 1
|
2019-11-06T03:44:42.000Z
|
2019-11-06T03:44:42.000Z
|
wagtail_graphql/apps.py
|
yatesrates/wagtail-graphql-api
|
8183c3c69340c9a48e2c352fc398e8bd255efa96
|
[
"BSD-3-Clause"
] | 3
|
2019-07-19T21:17:06.000Z
|
2019-10-31T03:34:44.000Z
|
from django import apps
class WagtailGraphQLConfig(apps.AppConfig):
name = 'wagtail_graphql'
def ready(self):
from wagtail_graphql.checks import register_checks
from wagtail_graphql.converters import register_converters
register_checks()
register_converters()
| 23.384615
| 66
| 0.733553
|
7d520a5d4960904ead911e7662a85cf47660f676
| 8,215
|
py
|
Python
|
main.py
|
pastelmind/slack-post-grabber
|
ab83d53de62f2398130fdd995f42d00788ed9eec
|
[
"MIT"
] | null | null | null |
main.py
|
pastelmind/slack-post-grabber
|
ab83d53de62f2398130fdd995f42d00788ed9eec
|
[
"MIT"
] | 6
|
2019-07-17T09:53:19.000Z
|
2019-08-28T06:57:36.000Z
|
main.py
|
pastelmind/slack-message-inspector
|
ab83d53de62f2398130fdd995f42d00788ed9eec
|
[
"MIT"
] | null | null | null |
"""Slack bot that grabs the source of Slack posts."""
import hmac
import json
import os
from hashlib import sha256
from http import HTTPStatus
from sys import stderr
from time import time, perf_counter
from typing import Any, Dict, Tuple
import flask
from requests import Session
TIMEOUT_POST_MESSAGE = float(os.getenv('TIMEOUT_POST_MESSAGE') or 3)
TIMEOUT_GET_POST_SOURCE = float(os.getenv('TIMEOUT_GET_POST_SOURCE') or 3)
_session = Session()
# The following verification methods are based on:
# - https://api.slack.com/docs/verifying-requests-from-slack#step-by-step_walk-through_for_validating_a_request
# - https://github.com/slackapi/python-slack-events-api/blob/master/slackeventsapi/server.py
def _is_valid_timestamp(timestamp: str) -> bool:
"""Checks if the given timestamp is at most five minutes from local time."""
return abs(time() - int(timestamp)) <= 60 * 5
def _is_valid_request_body(request_body: bytes, timestamp: str, signature: str) -> bool:
"""Verifies the contents of a Slack request against a signature."""
signing_secret = os.environ['SLACK_SIGNING_SECRET']
req = str.encode(f'v0:{timestamp}:') + request_body
request_hash = hmac.new(str.encode(signing_secret), req, sha256)
return hmac.compare_digest('v0=' + request_hash.hexdigest(), signature)
def _is_valid_request(request: flask.Request) -> bool:
"""Verifies the timestamp and signature of an incoming Slack request."""
timestamp = request.headers.get('X-Slack-Request-Timestamp')
if not _is_valid_timestamp(timestamp):
# This could be a replay attack, so let's ignore it.
print('Invalid timestamp', file=stderr)
return False
signature = request.headers.get('X-Slack-Signature')
if not _is_valid_request_body(request.get_data(), timestamp, signature):
print('Invalid signature', file=stderr)
return False
return True
def _split_upto_newline(source: str, maxlen: int) -> Tuple[str, str]:
"""Splits a string in two, limiting the length of the first part.
Splits the given string in two, such that the first part (segment) contains
at most `maxlen` characters.
If the source string contains a line break ('\\n') at or before maxlen, the
string is split at the newline, and the newline character itself is not
included in either the source or maxlen. This ensures that source code is
split as cleanly as possible.
Args:
source: String to split.
maxlen: Maximum number of characters allowed in the segment part.
Returns:
Tuple of (segment, remainder). If the source text has less characters
than max_pos, the remainder contains the empty string ('').
"""
assert maxlen >= 0, f'maxlen must be nonnegative (value={maxlen!r})'
split_current = split_next = maxlen
if len(source) > maxlen:
last_newline_pos = source.rfind('\n', 0, maxlen + 1)
if last_newline_pos != -1:
split_current = last_newline_pos
split_next = last_newline_pos + 1
return source[:split_current], source[split_next:]
def _send_response(response_url: str, text: str) -> None:
"""Sends text in an ephemeral message to a response URL provided by Slack.
Args:
response_url: URL provided by a Slack interaction request.
text: Text to send
"""
payload = {'text': text, 'response_type': 'ephemeral'}
_session.post(response_url, json=payload, timeout=TIMEOUT_POST_MESSAGE)
def _send_source_message(response_url: str, heading: str, source: str) -> None:
"""Sends an ephemeral message containing the source of a message or post.
If the source string is too long to fit in a single message, it will be
split into up to 5 messages. Any remaining string after that is truncated.
Args:
response_url: URL provided by a Slack interaction request.
heading: Heading text displayed above the source text.
source: Source text of a Slack message or post.
"""
MAX_TEXT_LENGTH = 40000
boilerplate = f'{heading}:\n```{{source}}```'
boilerplate_length = len(boilerplate.format(source=''))
if len(source) <= MAX_TEXT_LENGTH - boilerplate_length:
text = boilerplate.format(source=source)
_send_response(response_url, text)
else:
boilerplate = f'{heading} ({{i}} of {{count}}):\n```{{source}}```'
boilerplate_length = len(boilerplate.format(i=0, count=0, source=''))
segments = []
while source and len(segments) < 5:
segment, source = _split_upto_newline(
source, MAX_TEXT_LENGTH - boilerplate_length
)
segments.append(segment)
for i, segment in enumerate(segments):
text = boilerplate.format(
i=i + 1, count=len(segments), source=segment
)
_send_response(response_url, text)
def _is_slack_post(file_info: dict) -> bool:
"""Checks if the file type is a valid Slack post."""
return file_info['filetype'] in ('post', 'space', 'docs')
def _on_view_message_source(message: Dict[str, Any], response_url: str) -> None:
"""Responds to a view_message_source request.
Args:
message: The original message, parsed as JSON.
response_url: URL provided by a Slack interaction request.
"""
counter_begin = perf_counter()
source = json.dumps(message, indent=2, ensure_ascii=False)
_send_source_message(response_url, 'Raw JSON of message', source)
time_spent = perf_counter() - counter_begin
print(f'view_message_source: Took {time_spent * 1000:.4f} ms')
def _on_view_post_source(message: Dict[str, Any], response_url: str) -> None:
"""Responds to a view_post_source request.
Args:
message: The original message, parsed as JSON.
response_url: URL provided by a Slack interaction request.
"""
counter_begin = perf_counter()
attached_files = message.get('files', [])
slack_post = next(filter(_is_slack_post, attached_files), None)
if slack_post:
token = os.environ['SLACK_OAUTH_TOKEN']
response = _session.get(
slack_post['url_private'],
headers={'Authorization': f'Bearer {token}'},
timeout=TIMEOUT_GET_POST_SOURCE,
)
post_json = response.json()
source = post_json.get('full')
if not source:
source = json.dumps(post_json, indent=2, ensure_ascii=False)
_send_source_message(response_url, 'Raw source of post', source)
else:
_send_response(response_url, 'Error: Not a Slack post')
time_spent = perf_counter() - counter_begin
print(f'view_post_source: Took {time_spent * 1000:.4f} ms')
def on_request(request: flask.Request) -> Any:
"""Handles an interaction event request sent by Slack.
Args:
request: The Flask Request object.
<http://flask.pocoo.org/docs/1.0/api/#flask.Request>
Returns:
Response text or object to be passed to `make_response()`.
<http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>
"""
if request.method != 'POST':
return 'Only POST requests are accepted', HTTPStatus.METHOD_NOT_ALLOWED
if not _is_valid_request(request):
return '', HTTPStatus.FORBIDDEN
# Interaction event data is sent as JSON in the `payload` parameter, using
# application/x-www-form-urlencoded format
payload_str = request.values['payload']
payload = json.loads(payload_str)
assert payload['type'] == 'message_action', (
f'Unexpected payload type received, see contents: {payload_str}'
)
# slackclient v2.1.0 does not provide a convenience class for message
# actions, so manually access the JSON fields
callback_id = payload['callback_id']
original_message = payload['message']
response_url = payload['response_url']
if callback_id == 'view_message_source':
_on_view_message_source(original_message, response_url)
elif callback_id == 'view_post_source':
_on_view_post_source(original_message, response_url)
else:
assert 0, f'Unexpected callback ID: {callback_id}'
return '', HTTPStatus.OK
| 36.838565
| 111
| 0.684601
|
0a69e30b6b4544c208ee3c53163643b768d36a98
| 834
|
py
|
Python
|
mysite/mysite/urls.py
|
marktiu7/demo
|
07df5d706b17b2d9a6275a96d803914b32ab1630
|
[
"Apache-2.0"
] | null | null | null |
mysite/mysite/urls.py
|
marktiu7/demo
|
07df5d706b17b2d9a6275a96d803914b32ab1630
|
[
"Apache-2.0"
] | null | null | null |
mysite/mysite/urls.py
|
marktiu7/demo
|
07df5d706b17b2d9a6275a96d803914b32ab1630
|
[
"Apache-2.0"
] | null | null | null |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/dev/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
urlpatterns +=[
url(r'^online/',include('online.urls')),
]
| 32.076923
| 79
| 0.697842
|
7b2c6034d61f77a06b46d687b6c0211a6f248a6b
| 4,678
|
py
|
Python
|
benchmarks/PETSc/hm1_1Dbeam/hm1_1Dbeam.py
|
GeoStat-Framework/ogs5py_benchmarks
|
0b6db19b87cfad36459757f99ce2458f8e12b20b
|
[
"BSD-4-Clause"
] | 3
|
2019-01-15T17:38:11.000Z
|
2020-01-07T23:44:12.000Z
|
benchmarks/PETSc/hm1_1Dbeam/hm1_1Dbeam.py
|
GeoStat-Framework/ogs5py_benchmarks
|
0b6db19b87cfad36459757f99ce2458f8e12b20b
|
[
"BSD-4-Clause"
] | 1
|
2020-05-12T09:18:09.000Z
|
2020-05-12T10:48:32.000Z
|
benchmarks/PETSc/hm1_1Dbeam/hm1_1Dbeam.py
|
GeoStat-Framework/ogs5py_benchmarks
|
0b6db19b87cfad36459757f99ce2458f8e12b20b
|
[
"BSD-4-Clause"
] | 1
|
2020-01-08T13:28:50.000Z
|
2020-01-08T13:28:50.000Z
|
# -*- coding: utf-8 -*-
from ogs5py import OGS
model = OGS(
task_root='hm1_1Dbeam_root',
task_id='hm1_1Dbeam',
output_dir='out',
)
model.msh.read_file('hm1_1Dbeam.msh')
model.gli.read_file('hm1_1Dbeam.gli')
model.pcs.add_block(
main_key='PROCESS',
PCS_TYPE='LIQUID_FLOW',
ELEMENT_MATRIX_OUTPUT=0,
)
model.pcs.add_block(
main_key='PROCESS',
PCS_TYPE='DEFORMATION',
ELEMENT_MATRIX_OUTPUT=0,
)
model.rfd.read_file('hm1_1Dbeam.rfd')
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='DEFORMATION',
PRIMARY_VARIABLE='DISPLACEMENT_X1',
GEO_TYPE=['SURFACE', 'SURFACE1'],
DIS_TYPE=[
['LINEAR', 4],
[0, 0.0],
[1, 0.0],
[2, 0.0],
[3, 0.0],
],
TIM_TYPE=['CURVE', 1],
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='DEFORMATION',
PRIMARY_VARIABLE='DISPLACEMENT_Y1',
GEO_TYPE=['SURFACE', 'SURFACE2'],
DIS_TYPE=[
['LINEAR', 4],
[4, 0.0],
[5, 0.0],
[6, 0.0],
[7, 0.0],
],
TIM_TYPE=['CURVE', 1],
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='DEFORMATION',
PRIMARY_VARIABLE='DISPLACEMENT_Y1',
GEO_TYPE=['SURFACE', 'SURFACE3'],
DIS_TYPE=[
['LINEAR', 4],
[8, 0.0],
[9, 0.0],
[10, 0.0],
[11, 0.0],
],
TIM_TYPE=['CURVE', 1],
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='DEFORMATION',
PRIMARY_VARIABLE='DISPLACEMENT_Z1',
GEO_TYPE=['SURFACE', 'SURFACE4'],
DIS_TYPE=[
['LINEAR', 4],
[12, 0.0],
[13, 0.0],
[14, 0.0],
[15, 0.0],
],
TIM_TYPE=['CURVE', 1],
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='DEFORMATION',
PRIMARY_VARIABLE='DISPLACEMENT_Z1',
GEO_TYPE=['SURFACE', 'SURFACE5'],
DIS_TYPE=[
['LINEAR', 4],
[16, 0.0],
[17, 0.0],
[18, 0.0],
[19, 0.0],
],
TIM_TYPE=['CURVE', 1],
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='LIQUID_FLOW',
PRIMARY_VARIABLE='PRESSURE1',
GEO_TYPE=['SURFACE', 'SURFACE6'],
DIS_TYPE=[
['LINEAR', 4],
[20, 0.0],
[21, 0.0],
[22, 0.0],
[23, 0.0],
],
TIM_TYPE=['CURVE', 1],
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='LIQUID_FLOW',
PRIMARY_VARIABLE='PRESSURE1',
GEO_TYPE=['SURFACE', 'SURFACE7'],
DIS_TYPE=[
['LINEAR', 4],
[24, 1000000.0],
[25, 1000000.0],
[26, 1000000.0],
[27, 1000000.0],
],
TIM_TYPE=['CURVE', 1],
)
model.ic.add_block(
main_key='INITIAL_CONDITION',
PCS_TYPE='LIQUID_FLOW',
PRIMARY_VARIABLE='PRESSURE1',
GEO_TYPE='DOMAIN',
DIS_TYPE=['CONSTANT', 500000.0],
)
model.mmp.add_block(
main_key='MEDIUM_PROPERTIES',
GEOMETRY_DIMENSION=3,
POROSITY=[1, 0.0],
PERMEABILITY_TENSOR=['ISOTROPIC', 1e-12],
)
model.msp.add_block(
main_key='SOLID_PROPERTIES',
DENSITY=[1, 0.0],
ELASTICITY=[
['POISSION', 0.25],
['YOUNGS_MODULUS'],
[1, 25000000000.0],
],
BIOT_CONSTANT=1.0,
)
model.mfp.add_block(
main_key='FLUID_PROPERTIES',
FLUID_TYPE='LIQUID',
PCS_TYPE='PRESSURE1',
DENSITY=[1, 0.0],
VISCOSITY=[1, 0.001],
)
model.num.add_block(
main_key='NUMERICS',
PCS_TYPE='LIQUID_FLOW',
LINEAR_SOLVER=['petsc', 'bcgs', 'bjacobi', 1e-20, 10000],
)
model.num.add_block(
main_key='NUMERICS',
PCS_TYPE='DEFORMATION',
LINEAR_SOLVER=['petsc', 'bcgs', 'bjacobi', 1e-10, 10000],
)
model.tim.add_block(
main_key='TIME_STEPPING',
PCS_TYPE='LIQUID_FLOW',
TIME_UNIT='SECOND',
TIME_STEPS=[2, 1.0],
TIME_END=1.0,
TIME_START=0.0,
)
model.tim.add_block(
main_key='TIME_STEPPING',
PCS_TYPE='DEFORMATION',
TIME_UNIT='SECOND',
TIME_STEPS=[2, 1.0],
TIME_END=1.0,
TIME_START=0.0,
)
model.out.add_block(
main_key='OUTPUT',
PCS_TYPE='LIQUID_FLOW',
NOD_VALUES='PRESSURE1',
GEO_TYPE='DOMAIN',
DAT_TYPE='TECPLOT',
TIM_TYPE=1,
)
model.out.add_block(
main_key='OUTPUT',
PCS_TYPE='DEFORMATION',
NOD_VALUES=[
['DISPLACEMENT_X1'],
['DISPLACEMENT_Y1'],
['DISPLACEMENT_Z1'],
['STRESS_XX'],
['STRESS_XY'],
['STRESS_YY'],
['STRESS_ZZ'],
['STRESS_XZ'],
['STRESS_YZ'],
['STRAIN_XX'],
['STRAIN_XY'],
['STRAIN_YY'],
['STRAIN_ZZ'],
['STRAIN_XZ'],
['STRAIN_YZ'],
],
GEO_TYPE='DOMAIN',
DAT_TYPE='TECPLOT',
TIM_TYPE=1,
)
model.write_input()
model.run_model()
| 22.27619
| 61
| 0.570543
|
551db97944b1ef8364c49959e4edb9460495c838
| 1,992
|
py
|
Python
|
Course-4-python_databases/w2_sql/assignment_count_email.py
|
Mohamed2011-bit/-Python-For-Everybody-Coursera-
|
ec9686e0d21bb4f05436187abf36a1424bfd155a
|
[
"MIT"
] | 3
|
2020-06-06T05:48:03.000Z
|
2022-01-15T23:27:13.000Z
|
Course-4-python_databases/w2_sql/assignment_count_email.py
|
Mohamed2011-bit/-Python-For-Everybody-Coursera-
|
ec9686e0d21bb4f05436187abf36a1424bfd155a
|
[
"MIT"
] | null | null | null |
Course-4-python_databases/w2_sql/assignment_count_email.py
|
Mohamed2011-bit/-Python-For-Everybody-Coursera-
|
ec9686e0d21bb4f05436187abf36a1424bfd155a
|
[
"MIT"
] | 3
|
2021-03-24T22:05:38.000Z
|
2022-03-27T13:45:30.000Z
|
# 1. Import sqlite3
# 2. Call .connect() method to create connection object
# 3. Create cursor object
# 4. Delete table if it exists
# 5. Create table with domains and counts as attributes
# 6. Request file name
# 7. Create file handle
# 8. Loop through file to retrieve domain of emails
# 9a. Retrieve data
# 9b. Call fetchone() method to query db
# 10. if/else statement similar to get()
# 11. Commit changes with commit()
# 12. Print counts
# 1. Import sqlite3
import sqlite3
# 2. Call .connect() method to create connection object
connect_db = sqlite3.connect('domain_db.sqlite')
# 3. Create cursor object
cursor_db = connect_db.cursor()
# 4. Delete table if it exists
cursor_db.execute('''
DROP TABLE IF EXISTS Counts''')
# 5. Create table with emails and counts as attributes
cursor_db.execute('''
CREATE TABLE Counts(
org TEXT,
count INTEGER)''')
# 6. Request file name
fname = input('File name: ')
# 7. Create file handle
fhandle = open(fname)
# 8. Loop through file to retrieve domain of emails
for line in fhandle:
if not line.startswith('From: '):
continue
line = line.split()
email = line[1]
email = email.split('@')
org = email[1]
# Using cursor as iterator
# 9a. Retrieve data
cursor_db.execute('SELECT count FROM Counts WHERE org = ? ', (org,))
# 9b. Call fetchone() method to query db
row = cursor_db.fetchone()
# 10. if/else statement similar to get()
if row is None:
cursor_db.execute('''INSERT INTO Counts (org, count)
VALUES (?, 1)''', (org, ))
else:
cursor_db.execute('''UPDATE Counts
SET count = count + 1
WHERE org = ?''', (org, ))
# 11. Commit changes with commit()
connect_db.commit()
# 12. Print counts
sqlstr = '''SELECT org, count FROM Counts ORDER BY count DESC'''
for row in cursor_db.execute(sqlstr):
print (row[0], row[1])
# 13. Close cursor (communication)
cursor_db.close()
| 26.56
| 72
| 0.6501
|
23a25b1b4ab45b7a65a7502a1ad006fed053743f
| 1,954
|
py
|
Python
|
examples/dbpedia/dbpedia/writers.py
|
aitrek/quepy3
|
977452585bd04765a1e3d30d3d354b73d4a261cf
|
[
"BSD-3-Clause"
] | 1
|
2022-03-20T06:37:30.000Z
|
2022-03-20T06:37:30.000Z
|
examples/dbpedia/dbpedia/writers.py
|
aitrek/quepy3
|
977452585bd04765a1e3d30d3d354b73d4a261cf
|
[
"BSD-3-Clause"
] | null | null | null |
examples/dbpedia/dbpedia/writers.py
|
aitrek/quepy3
|
977452585bd04765a1e3d30d3d354b73d4a261cf
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2012, Machinalis S.R.L.
# This file is part of quepy and is distributed under the Modified BSD License.
# You should have received a copy of license in the LICENSE file.
#
# Authors: Rafael Carrascosa <rcarrascosa@machinalis.com>
# Gonzalo Garcia Berrotaran <ggarcia@machinalis.com>
"""
Writers related regex.
"""
from refo import Plus, Question
from quepy3.dsl import HasKeyword
from quepy3.parsing import Lemma, Lemmas, Pos, QuestionTemplate, Particle
from dbpedia.dsl import IsBook, HasAuthor, AuthorOf, IsPerson, NameOf
nouns = Pos("DT") | Pos("IN") | Pos("NN") | Pos("NNS") | Pos("NNP") | Pos("NNPS")
class Book(Particle):
regex = Plus(nouns)
def interpret(self, match):
name = match.words.tokens
return IsBook() + HasKeyword(name)
class Author(Particle):
regex = Plus(nouns | Lemma("."))
def interpret(self, match):
name = match.words.tokens
return IsPerson() + HasKeyword(name)
class WhoWroteQuestion(QuestionTemplate):
"""
Ex: "who wrote The Little Prince?"
"who is the author of A Game Of Thrones?"
"""
regex = ((Lemmas("who write") + Book()) |
(Question(Lemmas("who be") + Pos("DT")) +
Lemma("author") + Pos("IN") + Book())) + \
Question(Pos("."))
def interpret(self, match):
author = NameOf(IsPerson() + AuthorOf(match.book))
return author, "literal"
class BooksByAuthorQuestion(QuestionTemplate):
"""
Ex: "list books by George Orwell"
"which books did Suzanne Collins wrote?"
"""
regex = (Question(Lemma("list")) + Lemmas("book by") + Author()) | \
((Lemma("which") | Lemma("what")) + Lemmas("book do") +
Author() + Lemma("write") + Question(Pos(".")))
def interpret(self, match):
book = IsBook() + HasAuthor(match.author)
book_name = NameOf(book)
return book_name, "enum"
| 27.914286
| 81
| 0.619243
|
6a8d06a1e6f5271c87808a3e6055cae6ca564cf7
| 3,932
|
py
|
Python
|
mysql_insert_assignment(1).py
|
Lucy815-bit/Insert-values-in-Mysqldb-using-python
|
c83301f5f0c7e1d868f2e1b01cadad6d53cff5c0
|
[
"Apache-2.0"
] | null | null | null |
mysql_insert_assignment(1).py
|
Lucy815-bit/Insert-values-in-Mysqldb-using-python
|
c83301f5f0c7e1d868f2e1b01cadad6d53cff5c0
|
[
"Apache-2.0"
] | null | null | null |
mysql_insert_assignment(1).py
|
Lucy815-bit/Insert-values-in-Mysqldb-using-python
|
c83301f5f0c7e1d868f2e1b01cadad6d53cff5c0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import mysql.connector as mysql
db = mysql.connect(
host = "localhost",
user = "root",
passwd = "Enter your password here"
)
print(db)
# In[2]:
import mysql.connector as mysql
db = mysql.connect(
host = "localhost",
user = "root",
passwd = "Enter your password here"
)
## creating an instance of 'cursor' class which is used to execute the 'SQL' statements in 'Python'
cursor = db.cursor()
## creating a databse called 'datacamp'
## 'execute()' method is used to compile a 'SQL' statement
## below statement is used to create tha 'datacamp' database
cursor.execute("CREATE DATABASE Mydatabase")
# In[20]:
import mysql.connector as mysql
db = mysql.connect(
host = "localhost",
user = "root",
passwd = "Enter yourpassword here",
database = "Mydatabase"
)
cursor = db.cursor()
## creating a table called 'users' in the 'mydatabase' database
cursor.execute("CREATE TABLE customer (firstname VARCHAR(255), lastname VARCHAR(255), address VARCHAR(255), email VARCHAR(255), phone VARCHAR(255), city VARCHAR(255), state VARCHAR(255), country VARCHAR(255))")
# In[ ]:
# In[27]:
import mysql.connector
db_connection = mysql.connector.connect(
host="localhost",
user="root",
passwd="Enter your password here",
database="Mydatabase"
)
my_database = db_connection.cursor()
sql = "INSERT INTO customer(firstname,lastname, address, email, phone,city,state,country) VALUES(%s,%s,%s,%s,%s,%s,%s,%s)"
values = [
('Peter', 'Mwangi', 'Lowstreet 4','perterm@gmail.com','817-707-8732','Abuja','Equatorial','Nigeria'),
('Amy', 'Wanja', 'Apple st 652','Amy-Wanja@yahoo.com','817-187-8732','Dallas','Texas','USA'),
('Hannah', 'Hannadez', 'Mountain 21','HannahHannadez@ymail.com','817-787-4732','Miami','Florida','USA'),
('Michael', 'Onyango', 'Valley 345','perterm@gmail.com','847-787-8432','Manchester City','Manchester','United Kingdom'),
('Sandy', 'Beach', 'Ocean blvd 2','SandyBeach@gmail.com','847-757-8732','Ocala','Florida','USA'),
('Betty', 'Oprah','Green Grass 1','perterm@gmail.com','827-787-8732','Chicago','Illinois','USA'),
('Richard', 'Galloh', 'Sky st 331','perterm@gmail.com','812-787-0032','New York','New York','USA'),
('Susan', 'Griffin', 'Oneway 98','perterm@aol.com','817-787-8700','NAirobi','Nairobi','Kenya'),
('Vicky', 'Weja', 'Yellow Garden 2','Vicky.Weja@gmail.com','817-717-2332','Fort Bend','Indiana','USA'),
('Ben', 'Oguttu', 'Park Lane 38','BenOguttu@aol.com','807-727-8732','Plantation','Alabama','USA'),
('William', 'Ndola', 'Central st 954','WilliamNdola@gmail.com','810-787-8733','Houston','Texas','USA'),
('Chuck', 'Norris', 'Main Road 989','ChuckNorris@gmail.com','817-787-8734','Waco','Texas','USA'),
('Viola', 'Davis', 'Sideway 1633','Violad@gmail.com','817-787-8732','Los Angeles','California','USA')
]
my_database.executemany(sql,values)
db_connection.commit()
print(my_database.rowcount, "was inserted successfully.")
# In[1]:
import mysql.connector as mysql
db = mysql.connect(
host = "localhost",
user = "root",
passwd = "Enter your password here",
database = "mydatabase"
)
cursor = db.cursor()
## adding 'id' column to the 'users' table
## 'FIRST' keyword in the statement will add a column in the starting of the table
cursor.execute("ALTER TABLE customer ADD COLUMN customerID INT(11) NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST")
cursor.execute("DESC customer")
print(cursor.fetchall())
# In[30]:
#Insert a new field, call it customer ID in the customer's table you created.
#Run this update Python programs
#After the running, then display and share a screenshot with us in the forum
#Insert a new field, call it customer ID in the customer's table you created.
#Run this update Python programs
#After the running, then display and share a screenshot with us in the forum
# In[ ]:
# In[ ]:
| 28.28777
| 210
| 0.676246
|
2d5c87a1ab26cbbd28250a9f880072e42d5ce64d
| 932
|
py
|
Python
|
src/jfk/input_code.py
|
LeHack/Lex-Yacc-PLY
|
9e268c805107600a085e1d42bb445816f80d238e
|
[
"Beerware"
] | 14
|
2017-06-22T19:32:26.000Z
|
2021-07-11T21:09:29.000Z
|
src/jfk/input_code.py
|
LeHack/Lex-Yacc-PLY
|
9e268c805107600a085e1d42bb445816f80d238e
|
[
"Beerware"
] | null | null | null |
src/jfk/input_code.py
|
LeHack/Lex-Yacc-PLY
|
9e268c805107600a085e1d42bb445816f80d238e
|
[
"Beerware"
] | 6
|
2017-11-17T19:49:39.000Z
|
2020-06-07T22:30:35.000Z
|
# math expressions
x = 2 ** 8 + (-1 - 6) * 8
# variables
y = 2
x = x + 5 * y
# logical expressions
t1 = x < 5
t2 = (x >= 200 and True)
# printing and string support
print(x + 5)
print('x =', x)
print('x + 5 =', x + 5)
print('x % 100 =', x % 100)
print("x == 205 is", x == 205, '; x != 210 is', x != 210, "; x < 5 is", t1)
z = "Var-test"
print("Test", 'def', 1, t1, t2, z)
# conditional printing
if t1:
print(t1)
if x > 10:
print("Here you see x")
if x < 10:
print("Here you don't")
if x == 10 or x > 100:
print("And here you see it again")
if x == 10 and x > 100:
print("And here you don't see it again")
# postfix conditional
x = 15 if x > 200 else 200
print("x is LE 15") if x <= 15 else print("or not")
# loop
for i in range(1, 5): print("i =", i * 2)
j = 0
for i in range(0, 15): j = j + 1
print("j =", j)
# nested loop
k = 0
for i in range(0, 5):
for j in range(0, 5): k = k + 1
print("k =", k)
| 19.020408
| 75
| 0.537554
|
fe4d3fc29f94aa6b87c17db89caf18c4d1069d34
| 5,734
|
py
|
Python
|
lightly/cli/train_cli.py
|
umami-ware/lightly
|
5d70b34df7f784af249f9e9a6bfd6256756a877f
|
[
"MIT"
] | null | null | null |
lightly/cli/train_cli.py
|
umami-ware/lightly
|
5d70b34df7f784af249f9e9a6bfd6256756a877f
|
[
"MIT"
] | null | null | null |
lightly/cli/train_cli.py
|
umami-ware/lightly
|
5d70b34df7f784af249f9e9a6bfd6256756a877f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""**Lightly Train:** Train a self-supervised model from the command-line.
This module contains the entrypoint for the **lightly-train**
command-line interface.
"""
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import os
import hydra
import torch
import torch.nn as nn
import warnings
from torch.utils.hipify.hipify_python import bcolors
from lightly.cli._cli_simclr import _SimCLR
from lightly.data import ImageCollateFunction
from lightly.data import LightlyDataset
from lightly.embedding import SelfSupervisedEmbedding
from lightly.loss import NTXentLoss
from lightly.models import ResNetGenerator
from lightly.models.batchnorm import get_norm_layer
from lightly.cli._helpers import is_url
from lightly.cli._helpers import get_ptmodel_from_config
from lightly.cli._helpers import fix_input_path
from lightly.cli._helpers import load_state_dict_from_url
from lightly.cli._helpers import load_from_state_dict
from lightly.cli._helpers import cpu_count
def _train_cli(cfg, is_cli_call=True):
input_dir = cfg['input_dir']
if input_dir and is_cli_call:
input_dir = fix_input_path(input_dir)
if 'seed' in cfg.keys():
seed = cfg['seed']
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if cfg["trainer"]["weights_summary"] == "None":
cfg["trainer"]["weights_summary"] = None
if torch.cuda.is_available():
device = 'cuda'
elif cfg['trainer'] and cfg['trainer']['gpus']:
device = 'cpu'
cfg['trainer']['gpus'] = 0
else:
device = 'cpu'
if cfg['loader']['batch_size'] < 64:
msg = 'Training a self-supervised model with a small batch size: {}! '
msg = msg.format(cfg['loader']['batch_size'])
msg += 'Small batch size may harm embedding quality. '
msg += 'You can specify the batch size via the loader key-word: '
msg += 'loader.batch_size=BSZ'
warnings.warn(msg)
# determine the number of available cores
if cfg['loader']['num_workers'] < 0:
cfg['loader']['num_workers'] = cpu_count()
state_dict = None
checkpoint = cfg['checkpoint']
if cfg['pre_trained'] and not checkpoint:
# if checkpoint wasn't specified explicitly and pre_trained is True
# try to load the checkpoint from the model zoo
checkpoint, key = get_ptmodel_from_config(cfg['model'])
if not checkpoint:
msg = 'Cannot download checkpoint for key {} '.format(key)
msg += 'because it does not exist! '
msg += 'Model will be trained from scratch.'
warnings.warn(msg)
elif checkpoint:
checkpoint = fix_input_path(checkpoint) if is_cli_call else checkpoint
if checkpoint:
# load the PyTorch state dictionary and map it to the current device
if is_url(checkpoint):
state_dict = load_state_dict_from_url(
checkpoint, map_location=device
)['state_dict']
else:
state_dict = torch.load(
checkpoint, map_location=device
)['state_dict']
# load model
resnet = ResNetGenerator(cfg['model']['name'], cfg['model']['width'])
last_conv_channels = list(resnet.children())[-1].in_features
features = nn.Sequential(
get_norm_layer(3, 0),
*list(resnet.children())[:-1],
nn.Conv2d(last_conv_channels, cfg['model']['num_ftrs'], 1),
nn.AdaptiveAvgPool2d(1),
)
model = _SimCLR(
features,
num_ftrs=cfg['model']['num_ftrs'],
out_dim=cfg['model']['out_dim']
)
if state_dict is not None:
load_from_state_dict(model, state_dict)
criterion = NTXentLoss(**cfg['criterion'])
optimizer = torch.optim.SGD(model.parameters(), **cfg['optimizer'])
dataset = LightlyDataset(input_dir)
cfg['loader']['batch_size'] = min(
cfg['loader']['batch_size'],
len(dataset)
)
collate_fn = ImageCollateFunction(**cfg['collate'])
dataloader = torch.utils.data.DataLoader(dataset,
**cfg['loader'],
collate_fn=collate_fn)
encoder = SelfSupervisedEmbedding(model, criterion, optimizer, dataloader)
encoder.init_checkpoint_callback(**cfg['checkpoint_callback'])
encoder.train_embedding(**cfg['trainer'])
print(f'Best model is stored at: {bcolors.OKBLUE}{encoder.checkpoint}{bcolors.ENDC}')
os.environ[
cfg['environment_variable_names']['lightly_last_checkpoint_path']
] = encoder.checkpoint
return encoder.checkpoint
@hydra.main(config_path="config", config_name="config")
def train_cli(cfg):
"""Train a self-supervised model from the command-line.
Args:
cfg:
The default configs are loaded from the config file.
To overwrite them please see the section on the config file
(.config.config.yaml).
Command-Line Args:
input_dir:
Path to the input directory where images are stored.
Examples:
>>> # train model with default settings
>>> lightly-train input_dir=data/
>>>
>>> # train model with batches of size 128
>>> lightly-train input_dir=data/ loader.batch_size=128
>>>
>>> # train model for 10 epochs
>>> lightly-train input_dir=data/ trainer.max_epochs=10
>>>
>>> # print a full summary of the model
>>> lightly-train input_dir=data/ trainer.weights_summary=full
"""
return _train_cli(cfg)
def entry():
train_cli()
| 32.954023
| 89
| 0.648587
|
02b6c6435d96ecb7030468bfecbae53e1c0655e9
| 1,479
|
py
|
Python
|
python/5_Info_JR.py
|
sashaboulouds/statistics
|
bfda7a83353be6f3ebc2e50ca35050829b11c745
|
[
"MIT"
] | null | null | null |
python/5_Info_JR.py
|
sashaboulouds/statistics
|
bfda7a83353be6f3ebc2e50ca35050829b11c745
|
[
"MIT"
] | null | null | null |
python/5_Info_JR.py
|
sashaboulouds/statistics
|
bfda7a83353be6f3ebc2e50ca35050829b11c745
|
[
"MIT"
] | null | null | null |
## import numpy, panda, statsmodels
import pandas as pd
import numpy as np
import statsmodels.api as sm
import scipy
import matplotlib as mpl
mpl.use('TkAgg') # backend adjustment
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from math import sqrt
import random
import seaborn as sns
def Info(vy):
# considering type(vy) = pd.dataframe
vy_array = vy.values.ravel()
T = len(vy)
meany = vy.mean()
mediany = vy.median()
stddevy = vy.std()
skewnessy = (((vy - meany)**3)/(stddevy**3)).mean()
kurtosisy = (((vy - meany)**4)/(stddevy**4)).mean()
print "Average: " + meany
print "Median: " + mediany
print "Standard deviation: " + stddevy
print "Skewness: " + skewnessy
print "Kurtosis: " + kurtosisy
print "Percentile (0.01): " + vy.quantile(0.01)
print "Percentile (0.25): " + vy.quantile(0.25)
print "Percentile (0.75): " + vy.quantile(0.75)
print "Percentile (0.99): " + vy.quantile(0.99)
plt.plot(vy)
plt.savefig('info_1.png')
plt.clf()
plt.hist(vy_array, bins=50, density=True)
sns.kdeplot(vy_array, color='red', cut=True)
plt.plot(vy_array, scipy.stats.norm.pdf(vy_array, meany, stddevy))
plt.title("Histogram of returns")
plt.xlabel("returns")
plt.ylabel("frequency")
plt.savefig('info_2.png')
plt.clf()
plt.boxplot(vy_array)
plt.savefig('info_3.png')
plt.clf()
sm.qqplot(vy_array)
plt.savefig('info_4.png')
plt.clf()
| 26.890909
| 70
| 0.645707
|
649eac88304e80117335adf3ebf6a4f3615a8d43
| 1,350
|
py
|
Python
|
components/driver/test_apps/gptimer/app_test.py
|
guvvei/esp-idf
|
dae8bdb60bed819094137a52c2f86a1e49195a34
|
[
"Apache-2.0"
] | 2
|
2021-12-23T23:58:32.000Z
|
2022-03-19T03:42:31.000Z
|
components/driver/test_apps/gptimer/app_test.py
|
guvvei/esp-idf
|
dae8bdb60bed819094137a52c2f86a1e49195a34
|
[
"Apache-2.0"
] | null | null | null |
components/driver/test_apps/gptimer/app_test.py
|
guvvei/esp-idf
|
dae8bdb60bed819094137a52c2f86a1e49195a34
|
[
"Apache-2.0"
] | 3
|
2021-08-07T09:17:31.000Z
|
2022-03-20T21:54:52.000Z
|
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import glob
import os
import ttfw_idf
from tiny_test_fw import Utility
@ttfw_idf.idf_component_unit_test(env_tag='COMPONENT_UT_GENERIC', target=['esp32', 'esp32s2', 'esp32s3', 'esp32c3'])
def test_component_ut_gptimer(env, _): # type: (ttfw_idf.TinyFW.Env, None) -> None
# Get the names of all configs (sdkconfig.ci.* files)
config_files = glob.glob(os.path.join(os.path.dirname(__file__), 'sdkconfig.ci.*'))
config_names = [os.path.basename(s).replace('sdkconfig.ci.', '') for s in config_files]
# Run test once with binaries built for each config
for name in config_names:
Utility.console_log(f'Checking config "{name}"... ', end='')
dut = env.get_dut('gptimer', 'components/driver/test_apps/gptimer', app_config_name=name)
dut.start_app()
stdout = dut.expect('Press ENTER to see the list of tests', full_stdout=True)
dut.write('*')
stdout = dut.expect("Enter next test, or 'enter' to see menu", full_stdout=True, timeout=30)
ttfw_idf.ComponentUTResult.parse_result(stdout,ttfw_idf.TestFormat.UNITY_BASIC)
env.close_dut(dut.name)
Utility.console_log(f'Test config "{name}" done')
if __name__ == '__main__':
test_component_ut_gptimer()
| 43.548387
| 116
| 0.706667
|
f83ddb3c79b649b0b6771f4f548a6be2f8f895a3
| 17,110
|
py
|
Python
|
tensorflow/python/framework/struct_field.py
|
xuxin0509/tensorflow
|
e77736b0e51cb816ad239afd040557d46e1e9290
|
[
"Apache-2.0"
] | 1
|
2021-07-20T22:58:45.000Z
|
2021-07-20T22:58:45.000Z
|
tensorflow/python/framework/struct_field.py
|
dfki-thsc/tensorflow
|
8d746f768196a2434d112e98fc26c99590986d73
|
[
"Apache-2.0"
] | 2
|
2021-11-10T20:10:39.000Z
|
2022-02-10T05:15:31.000Z
|
tensorflow/python/framework/struct_field.py
|
dfki-thsc/tensorflow
|
8d746f768196a2434d112e98fc26c99590986d73
|
[
"Apache-2.0"
] | 1
|
2019-04-03T17:21:16.000Z
|
2019-04-03T17:21:16.000Z
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Meatadata about fields for user-defined Struct classes."""
import collections
import collections.abc
import typing
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import immutable_dict
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
# These names may not be used as the name for a Struct field (to prevent name
# clashes). All names beginning with `'_tf_struct'` are also reserved.
RESERVED_FIELD_NAMES = [
'self',
# Name of the nested TypeSpec class.
'Spec',
# Names defined by the CompositeTensor base class.
'_type_spec',
'_shape_invariant_to_type_spec',
'_consumers',
# Names defined by the TypeSpec base class.
'value_type',
'is_compatible_with',
'most_specific_compatible_type',
'_with_tensor_ranks_only',
'_to_components',
'_from_components',
'_component_specs',
'_to_tensor_list',
'_from_tensor_list',
'_from_compatible_tensor_list',
'_flat_tensor_specs',
'_serialize',
'_deserialize',
'_to_legacy_output_types',
'_to_legacy_output_shapes',
'_to_legacy_output_classes',
]
class Sentinel(object):
"""Sentinel value that's not equal (w/ `is`) to any user value."""
def __init__(self, name):
self._name = name
def __repr__(self):
return self._name
# ==============================================================================
# StructField
# ==============================================================================
class StructField(
collections.namedtuple('StructField', ['name', 'value_type', 'default'])):
"""Metadata about a single field in a `tf.struct` object."""
NO_DEFAULT = Sentinel('StructField.NO_DEFAULT')
def __new__(cls, name, value_type, default=NO_DEFAULT):
"""Constructs a new StructField containing metadata for a single field.
Args:
name: The name of the new field (`str`). May not be a reserved name.
value_type: A python type expression constraining what values this field
can take.
default: The default value for the new field, or `NO_DEFAULT` if this
field has no default value.
Returns:
A new `StructField`.
Raises:
TypeError: If the type described by `value_type` is not currently
supported by `tf.struct`.
TypeError: If `default` is specified and its type does not match
`value_type`.
"""
try:
validate_field_value_type(value_type, allow_forward_references=True)
except TypeError as e:
raise TypeError(f'In field {name!r}: {e}')
if default is not cls.NO_DEFAULT:
default = _convert_value(default, value_type,
(f'default value for {name}',))
return super(StructField, cls).__new__(cls, name, value_type, default)
@staticmethod
def is_reserved_name(name):
"""Returns true if `name` is a reserved name."""
return name in RESERVED_FIELD_NAMES or name.lower().startswith('_tf_struct')
def validate_field_value_type(value_type,
in_mapping_key=False,
allow_forward_references=False):
"""Checks that `value_type` contains only supported type annotations.
Args:
value_type: The type annotation to check.
in_mapping_key: True if `value_type` is nested in the key of a mapping.
allow_forward_references: If false, then raise an exception if a
`value_type` contains a forward reference (i.e., a string literal).
Raises:
TypeError: If `value_type` contains an unsupported type annotation.
"""
if isinstance(value_type, str) or is_forward_ref(value_type):
if allow_forward_references:
return
else:
raise TypeError(f'Unresolved forward reference {value_type!r}')
if value_type in (int, float, str, bytes, bool, None, _NoneType,
dtypes.DType):
return
elif (value_type in (ops.Tensor, tensor_shape.TensorShape) or
isinstance(value_type, type_spec.TypeSpec) or
(isinstance(value_type, type) and
issubclass(value_type, composite_tensor.CompositeTensor))):
if in_mapping_key:
raise TypeError('Key must be hashable.')
elif is_generic_tuple(value_type) or is_generic_union(value_type):
type_args = get_generic_type_args(value_type)
if (len(type_args) == 2 and type_args[1] is Ellipsis and
is_generic_tuple(value_type)): # `Tuple[X, ...]`
validate_field_value_type(type_args[0], in_mapping_key,
allow_forward_references)
else:
for arg in get_generic_type_args(value_type):
validate_field_value_type(arg, in_mapping_key, allow_forward_references)
elif is_generic_mapping(value_type):
key_type, value_type = get_generic_type_args(value_type)
validate_field_value_type(key_type, True, allow_forward_references)
validate_field_value_type(value_type, in_mapping_key,
allow_forward_references)
elif isinstance(value_type, type):
raise TypeError(f'Unsupported type annotation `{value_type.__name__}`')
else:
raise TypeError(f'Unsupported type annotation {value_type!r}')
# ==============================================================================
# Type-checking & conversion for StructField values
# ==============================================================================
def convert_fields(fields, field_values):
"""Type-checks and converts each field in `field_values` (in place).
Args:
fields: A list of `StructField` objects.
field_values: A `dict` mapping field names to values. Must contain an entry
for each field. I.e., `set(field_values.keys())` must be equal to
`set([f.name for f in fields])`.
Raises:
ValueError: If the keys of `field_values` do not match the names of
the fields in `fields`.
TypeError: If any value in `field_values` does not have the type indicated
by the corresponding `StructField` object.
"""
_convert_fields(fields, field_values, for_spec=False)
def convert_fields_for_spec(fields, field_values):
"""Type-checks and converts field values for a TypeSpec (in place).
This is similar to `convert_fields`, except that we expect a TypeSpec
for tensor-like types. In particular, if the `value_type` of a field
specifies a tensor-like type (tf.Tensor, CompositeTensor, or TypeSpec),
then the corresponding value in `fields` is expected to contain a TypeSpec
(rather than a value described by that TypeSpec).
Args:
fields: A list of `StructField` objects.
field_values: A `dict` mapping field names to values. Must contain an entry
for each field. I.e., `set(field_values.keys())` must be equal to
`set([f.name for f in fields])`.
Raises:
ValueError: If the keys of `field_values` do not match the names of
the fields in `fields`.
TypeError: If any value in `field_values` does not have the type indicated
by the corresponding `StructField` object.
"""
_convert_fields(fields, field_values, for_spec=True)
def _convert_fields(fields, field_values, for_spec):
"""Type-checks and converts each field in `field_values` (in place).
Args:
fields: A list of `StructField` objects.
field_values: A `dict` mapping field names to values. Must contain an entry
for each field. I.e., `set(field_values.keys())` must be equal to
`set([f.name for f in fields])`.
for_spec: If false, then expect a value for tensor-like types; if true, then
expect a TypeSpec for tensor-like types.
Raises:
ValueError: If the keys of `field_values` do not match the names of
the fields in `fields`.
TypeError: If any value in `field_values` does not have the type indicated
by the corresponding `StructField` object.
"""
converted = {}
if len(fields) != len(field_values):
_report_field_mismatches(fields, field_values)
for field in fields:
if field.name not in field_values:
_report_field_mismatches(fields, field_values)
field_value = field_values[field.name]
converted[field.name] = _convert_value(field_value, field.value_type,
(field.name,), for_spec)
field_values.update(converted)
def _convert_value(value, expected_type, path, for_spec=False):
"""Type-checks and converts a value.
Args:
value: The value to type-check.
expected_type: The expected type for the value.
path: Tuple of `str` naming the value (used for exception messages).
for_spec: If false, then expect a value for tensor-like types; if true, then
expect a TensorSpec for tensor-like types.
Returns:
A copy of `value`, converted to the expected type.
Raises:
TypeError: If `value` can not be converted to the expected type.
"""
assert isinstance(path, tuple)
if expected_type is None:
expected_type = _NoneType
if expected_type is ops.Tensor:
return _convert_tensor(value, path, for_spec)
elif isinstance(expected_type, tensor_spec.TensorSpec):
return _convert_tensor_spec(value, expected_type, path, for_spec)
elif isinstance(expected_type, type_spec.TypeSpec):
return _convert_type_spec(value, expected_type, path, for_spec)
elif (isinstance(expected_type, type) and
issubclass(expected_type, composite_tensor.CompositeTensor)):
return _convert_composite_tensor(value, expected_type, path, for_spec)
elif expected_type in (int, float, bool, str, bytes, _NoneType, dtypes.DType,
tensor_shape.TensorShape):
if not isinstance(value, expected_type):
raise TypeError(f'{"".join(path)}: expected '
f'{expected_type.__name__}, got {value!r}')
return value
elif is_generic_tuple(expected_type):
return _convert_tuple(value, expected_type, path, for_spec)
elif is_generic_mapping(expected_type):
return _convert_mapping(value, expected_type, path, for_spec)
elif is_generic_union(expected_type):
return _convert_union(value, expected_type, path, for_spec)
else:
raise TypeError(f'{"".join(path)}: Unsupported type annotation '
f'{expected_type!r}')
def _convert_tensor(value, path, for_spec):
"""Converts `value` to a `Tensor`."""
if for_spec:
if not isinstance(value, tensor_spec.TensorSpec):
raise TypeError(f'{"".join(path)}: expected a TensorSpec, got {value!r}')
return value
if not isinstance(value, ops.Tensor):
try:
value = ops.convert_to_tensor(value)
except (ValueError, TypeError) as e:
raise TypeError(f'{"".join(path)}: expected a Tensor, '
f'got {value!r}') from e
return value
def _convert_tensor_spec(value, expected_type, path, for_spec):
"""Converts `value` to a Tensor comptible with TensorSpec expected_type."""
if for_spec:
if not (isinstance(value, tensor_spec.TensorSpec) and
expected_type.is_compatible_with(value)):
raise TypeError(f'{"".join(path)}: expected a TensorSpec compatible '
f'with {expected_type}, got {value!r}')
return value
if not isinstance(value, ops.Tensor):
try:
value = ops.convert_to_tensor(value, expected_type.dtype)
except (ValueError, TypeError):
value = None
if value is None or not expected_type.is_compatible_with(value):
raise TypeError(f'{"".join(path)}: expected a Tensor compatible with '
f'{expected_type}, got {value!r}')
return value
def _convert_type_spec(value, expected_type, path, for_spec):
"""Converts `value` to a value comptible with TypeSpec `expected_type`."""
if for_spec:
if not (isinstance(value, type_spec.TypeSpec) and
expected_type.is_compatible_with(value)):
raise TypeError(f'{"".join(path)}: expected a TypeSpec compatible '
f'with {expected_type}, got {value!r}')
return value
if (isinstance(value, type_spec.TypeSpec) or
not expected_type.is_compatible_with(value)):
raise TypeError(f'{"".join(path)}: expected {expected_type!r}, '
f'got {value!r}')
return value
def _convert_composite_tensor(value, expected_type, path, for_spec):
"""Converts `value` to a value of type `expected_type`."""
if for_spec:
if not (isinstance(value, type_spec.TypeSpec) and
issubclass(value.value_type, expected_type)):
raise TypeError(f'{"".join(path)}: expected a TypeSpec for '
f'{expected_type.__name__}, got {value!r}')
return value
if not isinstance(value, expected_type):
raise TypeError(f'{"".join(path)}: expected {expected_type.__name__}, '
f'got {value!r}')
return value
def _convert_tuple(value, expected_type, path, for_spec):
"""Converts `value` to a tuple with type `expected_type`."""
if not isinstance(value, typing.Sequence):
raise TypeError(f'{"".join(path)}: expected tuple, got {value!r}')
element_types = get_generic_type_args(expected_type)
if len(element_types) == 2 and element_types[1] is Ellipsis:
return tuple([
_convert_value(v, element_types[0], path + (f'[{i}]',), for_spec)
for (i, v) in enumerate(value)
])
else:
if len(value) != len(element_types):
raise TypeError(f'{"".join(path)}: expected tuple with length '
f'{len(element_types)}, got {value!r})')
return tuple([
_convert_value(v, t, path + (f'[{i}]',), for_spec)
for (i, (v, t)) in enumerate(zip(value, element_types))
])
def _convert_mapping(value, expected_type, path, for_spec):
"""Converts `value` to a mapping with type `expected_type`."""
if not isinstance(value, typing.Mapping):
raise TypeError(f'{"".join(path)}: expected mapping, got {value!r}')
key_type, value_type = get_generic_type_args(expected_type)
return immutable_dict.ImmutableDict([
(_convert_value(k, key_type, path + ('[<key>]',), for_spec),
_convert_value(v, value_type, path + (f'[{k!r}]',), for_spec))
for (k, v) in value.items()
])
def _convert_union(value, expected_type, path, for_spec):
"""Converts `value` to a value with any of the types in `expected_type`."""
for type_option in get_generic_type_args(expected_type):
try:
return _convert_value(value, type_option, path, for_spec)
except TypeError:
pass
raise TypeError(f'{"".join(path)}: expected {expected_type}, got {value!r}')
def _report_field_mismatches(fields, field_values):
"""Raises an exception with mismatches between fields and field_values."""
expected = set(f.name for f in fields)
actual = set(field_values)
extra = actual - expected
if extra:
raise ValueError(f'Got unexpected fields: {extra}')
missing = expected - actual
if missing:
raise ValueError(f'Missing required fields: {missing}')
# ==============================================================================
# Utilities for accessing Python generic type annotations (typing.*)
# ==============================================================================
def is_generic_union(tp):
"""Returns true if `tp` is a parameterized typing.Union value."""
return (tp is not typing.Union and
getattr(tp, '__origin__', None) is typing.Union)
def is_generic_tuple(tp):
"""Returns true if `tp` is a parameterized typing.Tuple value."""
return (tp not in (tuple, typing.Tuple) and
getattr(tp, '__origin__', None) in (tuple, typing.Tuple))
def is_generic_mapping(tp):
"""Returns true if `tp` is a parameterized typing.Mapping value."""
return (tp not in (collections.abc.Mapping, typing.Mapping) and getattr(
tp, '__origin__', None) in (collections.abc.Mapping, typing.Mapping))
def is_forward_ref(tp):
"""Returns true if `tp` is a typing forward reference."""
if hasattr(typing, 'ForwardRef'):
return isinstance(tp, typing.ForwardRef)
elif hasattr(typing, '_ForwardRef'):
return isinstance(tp, typing._ForwardRef) # pylint: disable=protected-access
else:
return False
# Note: typing.get_args was added in Python 3.8.
if hasattr(typing, 'get_args'):
get_generic_type_args = typing.get_args
else:
get_generic_type_args = lambda tp: tp.__args__
_NoneType = type(None)
| 38.536036
| 81
| 0.675511
|
cfedb9613e4db5a4661068473d953686b1870962
| 905
|
py
|
Python
|
product/migrations/0001_initial.py
|
rodkiewicz/pola-backend
|
e26df1cea07b43c8b4272739234b7e78e2ce08c9
|
[
"BSD-3-Clause"
] | 30
|
2015-08-13T01:05:36.000Z
|
2022-01-22T03:02:50.000Z
|
product/migrations/0001_initial.py
|
rodkiewicz/pola-backend
|
e26df1cea07b43c8b4272739234b7e78e2ce08c9
|
[
"BSD-3-Clause"
] | 1,428
|
2015-10-08T07:38:26.000Z
|
2022-03-31T08:36:08.000Z
|
product/migrations/0001_initial.py
|
rodkiewicz/pola-backend
|
e26df1cea07b43c8b4272739234b7e78e2ce08c9
|
[
"BSD-3-Clause"
] | 13
|
2015-12-27T22:35:25.000Z
|
2022-02-01T15:55:58.000Z
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '__first__'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
(
'id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True),
),
('name', models.CharField(max_length=255, null=True)),
('code', models.CharField(max_length=20, db_index=True)),
(
'company',
models.ForeignKey(blank=True, to='company.Company', null=True, on_delete=models.CASCADE),
),
],
options={
'verbose_name': 'Product',
'verbose_name_plural': 'Products',
},
),
]
| 29.193548
| 110
| 0.478453
|
48c4c0aff0a2a80352b46d9406a2c516976582f8
| 2,601
|
py
|
Python
|
app.py
|
jorgemuriel/takseebot
|
c3529def6c32bdf7d9f948374ff3aba634d5b8f7
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
jorgemuriel/takseebot
|
c3529def6c32bdf7d9f948374ff3aba634d5b8f7
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
jorgemuriel/takseebot
|
c3529def6c32bdf7d9f948374ff3aba634d5b8f7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import urllib
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") != "yahooWeatherForecast":
return {
"speech": "Esto es una prueba del servidor",
"displayText": "Esto es una prueba del servidor",
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
baseurl = "https://query.yahooapis.com/v1/public/yql?"
yql_query = makeYqlQuery(req)
if yql_query is None:
return {}
yql_url = baseurl + urllib.urlencode({'q': yql_query}) + "&format=json"
result = urllib.urlopen(yql_url).read()
data = json.loads(result)
res = makeWebhookResult(data)
return res
def makeYqlQuery(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
if city is None:
return None
return "select * from weather.forecast where woeid in (select woeid from geo.places(1) where text='" + city + "')"
def makeWebhookResult(data):
query = data.get('query')
if query is None:
return {}
result = query.get('results')
if result is None:
return {}
channel = result.get('channel')
if channel is None:
return {}
item = channel.get('item')
location = channel.get('location')
units = channel.get('units')
if (location is None) or (item is None) or (units is None):
return {}
condition = item.get('condition')
if condition is None:
return {}
# print(json.dumps(item, indent=4))
speech = "Hoy la temperatura en Madrid es de + " + condition.get('temp') + " " + units.get('temperature')
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=False, port=port, host='0.0.0.0')
| 24.537736
| 118
| 0.608997
|
e757098295632284265a6be6a9d70e88db50edaa
| 1,105
|
py
|
Python
|
read_wkshader.py
|
entropia/Warpkern-Software
|
ffd8849b0de6d8e980914b5b1436269ad3977b3f
|
[
"MIT"
] | null | null | null |
read_wkshader.py
|
entropia/Warpkern-Software
|
ffd8849b0de6d8e980914b5b1436269ad3977b3f
|
[
"MIT"
] | null | null | null |
read_wkshader.py
|
entropia/Warpkern-Software
|
ffd8849b0de6d8e980914b5b1436269ad3977b3f
|
[
"MIT"
] | null | null | null |
import sys
import base64
from PIL import Image
from urllib.parse import urlparse
import json
import io
from itertools import chain, cycle
import numpy as np
from warpkern import PrerenderedAnim
def load_image_from_datauri(datauri: str):
path = urlparse(datauri).path
data = base64.b64decode(path.split(",")[-1])
im = Image.open(io.BytesIO(data))
if im.size != (191, 12):
raise RuntimeError("invalid dimensions: " + str(im.size))
return im.convert("RGBA")
def load_anim(f):
data = json.load(f)
images = (load_image_from_datauri(i) for i in data["preview"])
for i in images:
yield np.array(list(chain((0, 0, 0, 0), chain.from_iterable((0xFF, b, g, r) for r, g, b, a in i.getdata()))), np.uint8)
class ShaderAnim(PrerenderedAnim):
def __init__(self, filename):
with open(filename) as f:
arrays = list(load_anim(f))
self.looper = cycle(arrays)
def tick(self, time, dt):
return next(self.looper)
def generate_animations(filenames):
return [ShaderAnim(shaderfile)
for shaderfile in filenames]
| 26.309524
| 127
| 0.669683
|
25d44e5021e719e87c2af9e29b3a856c61376222
| 76,639
|
py
|
Python
|
nova/compute/manager.py
|
xushiwei/nova
|
f27956708b0aaeabb06125e6a72b4d61747934b7
|
[
"Apache-2.0"
] | 1
|
2021-11-08T10:11:44.000Z
|
2021-11-08T10:11:44.000Z
|
nova/compute/manager.py
|
xushiwei/nova
|
f27956708b0aaeabb06125e6a72b4d61747934b7
|
[
"Apache-2.0"
] | null | null | null |
nova/compute/manager.py
|
xushiwei/nova
|
f27956708b0aaeabb06125e6a72b4d61747934b7
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver,
responding to calls to check its state, attaching persistent storage, and
terminating it.
**Related Flags**
:instances_path: Where instances are kept on disk
:compute_driver: Name of class that is used to handle virtualization, loaded
by :func:`nova.utils.import_object`
:volume_manager: Name of class that handles persistent storage, loaded by
:func:`nova.utils.import_object`
"""
import os
import socket
import sys
import tempfile
import time
import functools
from eventlet import greenthread
import nova.context
from nova import block_device
from nova import exception
from nova import flags
import nova.image
from nova import log as logging
from nova import manager
from nova import network
from nova import rpc
from nova import utils
from nova import volume
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
from nova.notifier import api as notifier
from nova.compute.utils import terminate_volumes
from nova.virt import driver
FLAGS = flags.FLAGS
flags.DEFINE_string('instances_path', '$state_path/instances',
'where instances are stored on disk')
flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
'Driver to use for controlling virtualization')
flags.DEFINE_string('stub_network', False,
'Stub network related code')
flags.DEFINE_integer('password_length', 12,
'Length of generated admin passwords')
flags.DEFINE_string('console_host', socket.gethostname(),
'Console proxy host to use to connect to instances on'
'this host.')
flags.DEFINE_integer('live_migration_retry_count', 30,
"Retry count needed in live_migration."
" sleep 1 sec for each count")
flags.DEFINE_integer("rescue_timeout", 0,
"Automatically unrescue an instance after N seconds."
" Set to 0 to disable.")
flags.DEFINE_integer('host_state_interval', 120,
'Interval in seconds for querying the host status')
LOG = logging.getLogger('nova.compute.manager')
def publisher_id(host=None):
return notifier.publisher_id("compute", host)
def checks_instance_lock(function):
"""Decorator to prevent action against locked instances for non-admins."""
@functools.wraps(function)
def decorated_function(self, context, instance_id, *args, **kwargs):
#TODO(anyone): this being called instance_id is forcing a slightly
# confusing convention of pushing instance_uuids
# through an "instance_id" key in the queue args dict when
# casting through the compute API
LOG.info(_("check_instance_lock: decorating: |%s|"), function,
context=context)
LOG.info(_("check_instance_lock: arguments: |%(self)s| |%(context)s|"
" |%(instance_id)s|") % locals(), context=context)
locked = self.get_lock(context, instance_id)
admin = context.is_admin
LOG.info(_("check_instance_lock: locked: |%s|"), locked,
context=context)
LOG.info(_("check_instance_lock: admin: |%s|"), admin,
context=context)
# if admin or unlocked call function otherwise log error
if admin or not locked:
LOG.info(_("check_instance_lock: executing: |%s|"), function,
context=context)
function(self, context, instance_id, *args, **kwargs)
else:
LOG.error(_("check_instance_lock: not executing |%s|"),
function, context=context)
return False
return decorated_function
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
# TODO(vish): sync driver creation logic with the rest of the system
# and redocument the module docstring
if not compute_driver:
compute_driver = FLAGS.compute_driver
try:
self.driver = utils.check_isinstance(
utils.import_object(compute_driver),
driver.ComputeDriver)
except ImportError as e:
LOG.error(_("Unable to load the virtualization driver: %s") % (e))
sys.exit(1)
self.network_api = network.API()
self.network_manager = utils.import_object(FLAGS.network_manager)
self.volume_manager = utils.import_object(FLAGS.volume_manager)
self._last_host_check = 0
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
def _instance_update(self, context, instance_id, **kwargs):
"""Update an instance in the database using kwargs as value."""
return self.db.instance_update(context, instance_id, kwargs)
def init_host(self):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
instances = self.db.instance_get_all_by_host(context, self.host)
for instance in instances:
inst_name = instance['name']
db_state = instance['power_state']
drv_state = self._get_power_state(context, instance)
expect_running = db_state == power_state.RUNNING \
and drv_state != db_state
LOG.debug(_('Current state of %(inst_name)s is %(drv_state)s, '
'state in DB is %(db_state)s.'), locals())
if (expect_running and FLAGS.resume_guests_state_on_host_boot)\
or FLAGS.start_guests_on_host_boot:
LOG.info(_('Rebooting instance %(inst_name)s after '
'nova-compute restart.'), locals())
self.reboot_instance(context, instance['id'])
elif drv_state == power_state.RUNNING:
# Hyper-V and VMWareAPI drivers will raise and exception
try:
net_info = self._get_instance_nw_info(context, instance)
self.driver.ensure_filtering_rules_for_instance(instance,
net_info)
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not '
'support firewall rules'))
def _get_power_state(self, context, instance):
"""Retrieve the power state for the given instance."""
LOG.debug(_('Checking state of %s'), instance['name'])
try:
return self.driver.get_info(instance['name'])["state"]
except exception.NotFound:
return power_state.FAILED
def get_console_topic(self, context, **kwargs):
"""Retrieves the console host for a project on this host.
Currently this is just set in the flags for each compute host.
"""
#TODO(mdragon): perhaps make this variable by console_type?
return self.db.queue_get_for(context,
FLAGS.console_topic,
FLAGS.console_host)
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def refresh_security_group_rules(self, context, security_group_id,
**kwargs):
"""Tell the virtualization driver to refresh security group rules.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_rules(security_group_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def refresh_security_group_members(self, context,
security_group_id, **kwargs):
"""Tell the virtualization driver to refresh security group members.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_members(security_group_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def refresh_provider_fw_rules(self, context, **_kwargs):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
def _get_instance_nw_info(self, context, instance):
"""Get a list of dictionaries of network data of an instance.
Returns an empty list if stub_network flag is set."""
network_info = []
if not FLAGS.stub_network:
network_info = self.network_api.get_instance_nw_info(context,
instance)
return network_info
def _setup_block_device_mapping(self, context, instance_id):
"""setup volumes for block device mapping"""
volume_api = volume.API()
block_device_mapping = []
swap = None
ephemerals = []
for bdm in self.db.block_device_mapping_get_all_by_instance(
context, instance_id):
LOG.debug(_("setting up bdm %s"), bdm)
if bdm['no_device']:
continue
if bdm['virtual_name']:
virtual_name = bdm['virtual_name']
device_name = bdm['device_name']
assert block_device.is_swap_or_ephemeral(virtual_name)
if virtual_name == 'swap':
swap = {'device_name': device_name,
'swap_size': bdm['volume_size']}
elif block_device.is_ephemeral(virtual_name):
eph = {'num': block_device.ephemeral_num(virtual_name),
'virtual_name': virtual_name,
'device_name': device_name,
'size': bdm['volume_size']}
ephemerals.append(eph)
continue
if ((bdm['snapshot_id'] is not None) and
(bdm['volume_id'] is None)):
# TODO(yamahata): default name and description
vol = volume_api.create(context, bdm['volume_size'],
bdm['snapshot_id'], '', '')
# TODO(yamahata): creating volume simultaneously
# reduces creation time?
volume_api.wait_creation(context, vol['id'])
self.db.block_device_mapping_update(
context, bdm['id'], {'volume_id': vol['id']})
bdm['volume_id'] = vol['id']
if not ((bdm['snapshot_id'] is None) or
(bdm['volume_id'] is not None)):
LOG.error(_('corrupted state of block device mapping '
'id: %(id)s '
'snapshot: %(snapshot_id) volume: %(vollume_id)') %
{'id': bdm['id'],
'snapshot_id': bdm['snapshot'],
'volume_id': bdm['volume_id']})
raise exception.ApiError(_('broken block device mapping %d') %
bdm['id'])
if bdm['volume_id'] is not None:
volume_api.check_attach(context,
volume_id=bdm['volume_id'])
dev_path = self._attach_volume_boot(context, instance_id,
bdm['volume_id'],
bdm['device_name'])
block_device_mapping.append({'device_path': dev_path,
'mount_device':
bdm['device_name']})
return (swap, ephemerals, block_device_mapping)
def _run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
def _check_image_size():
"""Ensure image is smaller than the maximum size allowed by the
instance_type.
The image stored in Glance is potentially compressed, so we use two
checks to ensure that the size isn't exceeded:
1) This one - checks compressed size, this a quick check to
eliminate any images which are obviously too large
2) Check uncompressed size in nova.virt.xenapi.vm_utils. This
is a slower check since it requires uncompressing the entire
image, but is accurate because it reflects the image's
actual size.
"""
# NOTE(jk0): image_ref is defined in the DB model, image_href is
# used by the image service. This should be refactored to be
# consistent.
image_href = instance['image_ref']
image_service, image_id = nova.image.get_image_service(context,
image_href)
image_meta = image_service.show(context, image_id)
try:
size_bytes = image_meta['size']
except KeyError:
# Size is not a required field in the image service (yet), so
# we are unable to rely on it being there even though it's in
# glance.
# TODO(jk0): Should size be required in the image service?
return
instance_type_id = instance['instance_type_id']
instance_type = self.db.instance_type_get(context,
instance_type_id)
allowed_size_gb = instance_type['local_gb']
# NOTE(jk0): Since libvirt uses local_gb as a secondary drive, we
# need to handle potential situations where local_gb is 0. This is
# the default for m1.tiny.
if allowed_size_gb == 0:
return
allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
LOG.debug(_("image_id=%(image_id)d, image_size_bytes="
"%(size_bytes)d, allowed_size_bytes="
"%(allowed_size_bytes)d") % locals())
if size_bytes > allowed_size_bytes:
LOG.info(_("Image '%(image_id)d' size %(size_bytes)d exceeded"
" instance_type allowed size "
"%(allowed_size_bytes)d")
% locals())
raise exception.ImageTooLarge()
context = context.elevated()
instance = self.db.instance_get(context, instance_id)
requested_networks = kwargs.get('requested_networks', None)
if instance['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
_check_image_size()
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
updates = {}
updates['host'] = self.host
updates['launched_on'] = self.host
updates['vm_state'] = vm_states.BUILDING
updates['task_state'] = task_states.NETWORKING
instance = self.db.instance_update(context, instance_id, updates)
instance['injected_files'] = kwargs.get('injected_files', [])
instance['admin_pass'] = kwargs.get('admin_password', None)
is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
try:
# NOTE(vish): This could be a cast because we don't do anything
# with the address currently, but I'm leaving it as
# a call to ensure that network setup completes. We
# will eventually also need to save the address here.
if not FLAGS.stub_network:
network_info = self.network_api.allocate_for_instance(context,
instance, vpn=is_vpn,
requested_networks=requested_networks)
LOG.debug(_("instance network_info: |%s|"), network_info)
else:
# TODO(tr3buchet) not really sure how this should be handled.
# virt requires network_info to be passed in but stub_network
# is enabled. Setting to [] for now will cause virt to skip
# all vif creation and network injection, maybe this is correct
network_info = []
self._instance_update(context,
instance_id,
vm_state=vm_states.BUILDING,
task_state=task_states.BLOCK_DEVICE_MAPPING)
(swap, ephemerals,
block_device_mapping) = self._setup_block_device_mapping(
context, instance_id)
block_device_info = {
'root_device_name': instance['root_device_name'],
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
self._instance_update(context,
instance_id,
vm_state=vm_states.BUILDING,
task_state=task_states.SPAWNING)
# TODO(vish) check to make sure the availability zone matches
try:
self.driver.spawn(context, instance,
network_info, block_device_info)
except Exception as ex: # pylint: disable=W0702
msg = _("Instance '%(instance_id)s' failed to spawn. Is "
"virtualization enabled in the BIOS? Details: "
"%(ex)s") % locals()
LOG.exception(msg)
return
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance_id,
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None,
launched_at=utils.utcnow())
usage_info = utils.usage_from_instance(instance)
notifier.notify('compute.%s' % self.host,
'compute.instance.create',
notifier.INFO, usage_info)
except exception.InstanceNotFound:
# FIXME(wwolf): We are just ignoring InstanceNotFound
# exceptions here in case the instance was immediately
# deleted before it actually got created. This should
# be fixed once we have no-db-messaging
pass
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def run_instance(self, context, instance_id, **kwargs):
self._run_instance(context, instance_id, **kwargs)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def start_instance(self, context, instance_id):
"""Starting an instance on this host."""
# TODO(yamahata): injected_files isn't supported.
# Anyway OSAPI doesn't support stop/start yet
self._run_instance(context, instance_id)
def _shutdown_instance(self, context, instance_id, action_str):
"""Shutdown an instance on this host."""
context = context.elevated()
instance = self.db.instance_get(context, instance_id)
LOG.audit(_("%(action_str)s instance %(instance_id)s") %
{'action_str': action_str, 'instance_id': instance_id},
context=context)
network_info = self._get_instance_nw_info(context, instance)
if not FLAGS.stub_network:
self.network_api.deallocate_for_instance(context, instance)
volumes = instance.get('volumes') or []
for volume in volumes:
self._detach_volume(context, instance_id, volume['id'], False)
if instance['power_state'] == power_state.SHUTOFF:
self.db.instance_destroy(context, instance_id)
raise exception.Error(_('trying to destroy already destroyed'
' instance: %s') % instance_id)
self.driver.destroy(instance, network_info)
if action_str == 'Terminating':
terminate_volumes(self.db, context, instance_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this host."""
self._shutdown_instance(context, instance_id, 'Terminating')
instance = self.db.instance_get(context.elevated(), instance_id)
self._instance_update(context,
instance_id,
vm_state=vm_states.DELETED,
task_state=None,
terminated_at=utils.utcnow())
self.db.instance_destroy(context, instance_id)
usage_info = utils.usage_from_instance(instance)
notifier.notify('compute.%s' % self.host,
'compute.instance.delete',
notifier.INFO, usage_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def stop_instance(self, context, instance_id):
"""Stopping an instance on this host."""
self._shutdown_instance(context, instance_id, 'Stopping')
self._instance_update(context,
instance_id,
vm_state=vm_states.STOPPED,
task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def rebuild_instance(self, context, instance_id, **kwargs):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
:param context: `nova.RequestContext` object
:param instance_id: Instance identifier (integer)
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
"""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("Rebuilding instance %s"), instance_id, context=context)
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
instance_id,
power_state=current_power_state,
vm_state=vm_states.REBUILDING,
task_state=None)
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.destroy(instance_ref, network_info)
self._instance_update(context,
instance_id,
vm_state=vm_states.REBUILDING,
task_state=task_states.BLOCK_DEVICE_MAPPING)
instance_ref.injected_files = kwargs.get('injected_files', [])
network_info = self.network_api.get_instance_nw_info(context,
instance_ref)
bd_mapping = self._setup_block_device_mapping(context, instance_id)
self._instance_update(context,
instance_id,
vm_state=vm_states.REBUILDING,
task_state=task_states.SPAWNING)
# pull in new password here since the original password isn't in the db
instance_ref.admin_pass = kwargs.get('new_pass',
utils.generate_password(FLAGS.password_length))
self.driver.spawn(context, instance_ref, network_info, bd_mapping)
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
instance_id,
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None,
launched_at=utils.utcnow())
usage_info = utils.usage_from_instance(instance_ref)
notifier.notify('compute.%s' % self.host,
'compute.instance.rebuild',
notifier.INFO,
usage_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def reboot_instance(self, context, instance_id):
"""Reboot an instance on this host."""
LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
instance_id,
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING)
if instance_ref['power_state'] != power_state.RUNNING:
state = instance_ref['power_state']
running = power_state.RUNNING
LOG.warn(_('trying to reboot a non-running '
'instance: %(instance_id)s (state: %(state)s '
'expected: %(running)s)') % locals(),
context=context)
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.reboot(instance_ref, network_info)
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
instance_id,
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def snapshot_instance(self, context, instance_id, image_id,
image_type='snapshot', backup_type=None,
rotation=None):
"""Snapshot an instance on this host.
:param context: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param image_id: glance.db.sqlalchemy.models.Image.Id
:param image_type: snapshot | backup
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
if image_type == "snapshot":
task_state = task_states.IMAGE_SNAPSHOT
elif image_type == "backup":
task_state = task_states.IMAGE_BACKUP
else:
raise Exception(_('Image type not recognized %s') % image_type)
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
instance_id,
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=task_state)
LOG.audit(_('instance %s: snapshotting'), instance_id,
context=context)
if instance_ref['power_state'] != power_state.RUNNING:
state = instance_ref['power_state']
running = power_state.RUNNING
LOG.warn(_('trying to snapshot a non-running '
'instance: %(instance_id)s (state: %(state)s '
'expected: %(running)s)') % locals())
self.driver.snapshot(context, instance_ref, image_id)
self._instance_update(context, instance_id, task_state=None)
if image_type == 'snapshot' and rotation:
raise exception.ImageRotationNotAllowed()
elif image_type == 'backup' and rotation:
instance_uuid = instance_ref['uuid']
self.rotate_backups(context, instance_uuid, backup_type, rotation)
elif image_type == 'backup':
raise exception.RotationRequiredForBackup()
def rotate_backups(self, context, instance_uuid, backup_type, rotation):
"""Delete excess backups associated to an instance.
Instances are allowed a fixed number of backups (the rotation number);
this method deletes the oldest backups that exceed the rotation
threshold.
:param context: security context
:param instance_uuid: string representing uuid of instance
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
# NOTE(jk0): Eventually extract this out to the ImageService?
def fetch_images():
images = []
marker = None
while True:
batch = image_service.detail(context, filters=filters,
marker=marker, sort_key='created_at', sort_dir='desc')
if not batch:
break
images += batch
marker = batch[-1]['id']
return images
image_service = nova.image.get_default_image_service()
filters = {'property-image_type': 'backup',
'property-backup_type': backup_type,
'property-instance_uuid': instance_uuid}
images = fetch_images()
num_images = len(images)
LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)"
% locals()))
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
LOG.debug(_("Rotating out %d backups" % excess))
for i in xrange(excess):
image = images.pop()
image_id = image['id']
LOG.debug(_("Deleting image %d" % image_id))
image_service.delete(context, image_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def set_admin_password(self, context, instance_id, new_pass=None):
"""Set the root/admin password for an instance on this host.
This is generally only called by API password resets after an
image has been built.
"""
context = context.elevated()
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password(FLAGS.password_length)
max_tries = 10
for i in xrange(max_tries):
instance_ref = self.db.instance_get(context, instance_id)
instance_id = instance_ref["id"]
instance_state = instance_ref["power_state"]
expected_state = power_state.RUNNING
if instance_state != expected_state:
raise exception.Error(_('Instance is not running'))
else:
try:
self.driver.set_admin_password(instance_ref, new_pass)
LOG.audit(_("Instance %s: Root password set"),
instance_ref["name"])
break
except NotImplementedError:
# NOTE(dprince): if the driver doesn't implement
# set_admin_password we break to avoid a loop
LOG.warn(_('set_admin_password is not implemented '
'by this driver.'))
break
except Exception, e:
# Catch all here because this could be anything.
LOG.exception(e)
if i == max_tries - 1:
# At some point this exception may make it back
# to the API caller, and we don't want to reveal
# too much. The real exception is logged above
raise exception.Error(_('Internal error'))
time.sleep(1)
continue
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def inject_file(self, context, instance_id, path, file_contents):
"""Write a file to the specified path in an instance on this host."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
instance_id = instance_ref['id']
instance_state = instance_ref['power_state']
expected_state = power_state.RUNNING
if instance_state != expected_state:
LOG.warn(_('trying to inject a file into a non-running '
'instance: %(instance_id)s (state: %(instance_state)s '
'expected: %(expected_state)s)') % locals())
nm = instance_ref['name']
msg = _('instance %(nm)s: injecting file to %(path)s') % locals()
LOG.audit(msg)
self.driver.inject_file(instance_ref, path, file_contents)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def agent_update(self, context, instance_id, url, md5hash):
"""Update agent running on an instance on this host."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
instance_id = instance_ref['id']
instance_state = instance_ref['power_state']
expected_state = power_state.RUNNING
if instance_state != expected_state:
LOG.warn(_('trying to update agent on a non-running '
'instance: %(instance_id)s (state: %(instance_state)s '
'expected: %(expected_state)s)') % locals())
nm = instance_ref['name']
msg = _('instance %(nm)s: updating agent to %(url)s') % locals()
LOG.audit(msg)
self.driver.agent_update(instance_ref, url, md5hash)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def rescue_instance(self, context, instance_id):
"""Rescue an instance on this host."""
LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
network_info = self._get_instance_nw_info(context, instance_ref)
# NOTE(blamar): None of the virt drivers use the 'callback' param
self.driver.rescue(context, instance_ref, None, network_info)
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
instance_id,
vm_state=vm_states.RESCUED,
task_state=None,
power_state=current_power_state)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def unrescue_instance(self, context, instance_id):
"""Rescue an instance on this host."""
LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
network_info = self._get_instance_nw_info(context, instance_ref)
# NOTE(blamar): None of the virt drivers use the 'callback' param
self.driver.unrescue(instance_ref, None, network_info)
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
instance_id,
vm_state=vm_states.ACTIVE,
task_state=None,
power_state=current_power_state)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def confirm_resize(self, context, instance_id, migration_id):
"""Destroys the source instance."""
migration_ref = self.db.migration_get(context, migration_id)
instance_ref = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.destroy(instance_ref, network_info)
usage_info = utils.usage_from_instance(instance_ref)
notifier.notify('compute.%s' % self.host,
'compute.instance.resize.confirm',
notifier.INFO,
usage_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def revert_resize(self, context, instance_id, migration_id):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
source machine.
"""
migration_ref = self.db.migration_get(context, migration_id)
instance_ref = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.destroy(instance_ref, network_info)
topic = self.db.queue_get_for(context, FLAGS.compute_topic,
instance_ref['host'])
rpc.cast(context, topic,
{'method': 'finish_revert_resize',
'args': {'instance_id': instance_ref['uuid'],
'migration_id': migration_ref['id']},
})
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def finish_revert_resize(self, context, instance_id, migration_id):
"""Finishes the second half of reverting a resize.
Power back on the source instance and revert the resized attributes
in the database.
"""
migration_ref = self.db.migration_get(context, migration_id)
instance_ref = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
instance_type = self.db.instance_type_get(context,
migration_ref['old_instance_type_id'])
# Just roll back the record. There's no need to resize down since
# the 'old' VM already has the preferred attributes
self._instance_update(context,
instance_ref["uuid"],
memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
local_gb=instance_type['local_gb'],
instance_type_id=instance_type['id'])
self.driver.revert_migration(instance_ref)
self.db.migration_update(context, migration_id,
{'status': 'reverted'})
usage_info = utils.usage_from_instance(instance_ref)
notifier.notify('compute.%s' % self.host,
'compute.instance.resize.revert',
notifier.INFO,
usage_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def prep_resize(self, context, instance_id, instance_type_id):
"""Initiates the process of moving a running instance to another host.
Possibly changes the RAM and disk size in the process.
"""
context = context.elevated()
# Because of checks_instance_lock, this must currently be called
# instance_id. However, the compute API is always passing the UUID
# of the instance down
instance_ref = self.db.instance_get_by_uuid(context, instance_id)
if instance_ref['host'] == FLAGS.host:
self._instance_update(context,
instance_id,
vm_state=vm_states.ERROR)
msg = _('Migration error: destination same as source!')
raise exception.Error(msg)
old_instance_type = self.db.instance_type_get(context,
instance_ref['instance_type_id'])
new_instance_type = self.db.instance_type_get(context,
instance_type_id)
migration_ref = self.db.migration_create(context,
{'instance_uuid': instance_ref['uuid'],
'source_compute': instance_ref['host'],
'dest_compute': FLAGS.host,
'dest_host': self.driver.get_host_ip_addr(),
'old_instance_type_id': old_instance_type['id'],
'new_instance_type_id': instance_type_id,
'status': 'pre-migrating'})
LOG.audit(_('instance %s: migrating'), instance_ref['uuid'],
context=context)
topic = self.db.queue_get_for(context, FLAGS.compute_topic,
instance_ref['host'])
rpc.cast(context, topic,
{'method': 'resize_instance',
'args': {'instance_id': instance_ref['uuid'],
'migration_id': migration_ref['id']}})
usage_info = utils.usage_from_instance(instance_ref,
new_instance_type=new_instance_type['name'],
new_instance_type_id=new_instance_type['id'])
notifier.notify('compute.%s' % self.host,
'compute.instance.resize.prep',
notifier.INFO,
usage_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def resize_instance(self, context, instance_id, migration_id):
"""Starts the migration of a running instance to another host."""
migration_ref = self.db.migration_get(context, migration_id)
instance_ref = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
self.db.migration_update(context,
migration_id,
{'status': 'migrating'})
disk_info = self.driver.migrate_disk_and_power_off(
instance_ref, migration_ref['dest_host'])
self.db.migration_update(context,
migration_id,
{'status': 'post-migrating'})
service = self.db.service_get_by_host_and_topic(
context, migration_ref['dest_compute'], FLAGS.compute_topic)
topic = self.db.queue_get_for(context,
FLAGS.compute_topic,
migration_ref['dest_compute'])
params = {'migration_id': migration_id,
'disk_info': disk_info,
'instance_id': instance_ref['uuid']}
rpc.cast(context, topic, {'method': 'finish_resize',
'args': params})
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def finish_resize(self, context, instance_id, migration_id, disk_info):
"""Completes the migration process.
Sets up the newly transferred disk and turns on the instance at its
new host machine.
"""
migration_ref = self.db.migration_get(context, migration_id)
resize_instance = False
instance_ref = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
if migration_ref['old_instance_type_id'] != \
migration_ref['new_instance_type_id']:
instance_type = self.db.instance_type_get(context,
migration_ref['new_instance_type_id'])
self.db.instance_update(context, instance_ref.uuid,
dict(instance_type_id=instance_type['id'],
memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
local_gb=instance_type['local_gb']))
resize_instance = True
instance_ref = self.db.instance_get_by_uuid(context,
instance_ref.uuid)
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.finish_migration(context, instance_ref, disk_info,
network_info, resize_instance)
self._instance_update(context,
instance_id,
vm_state=vm_states.ACTIVE,
task_state=task_states.RESIZE_VERIFY)
self.db.migration_update(context, migration_id,
{'status': 'finished', })
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def add_fixed_ip_to_instance(self, context, instance_id, network_id):
"""Calls network_api to add new fixed_ip to instance
then injects the new network info and resets instance networking.
"""
self.network_api.add_fixed_ip_to_instance(context, instance_id,
self.host, network_id)
self.inject_network_info(context, instance_id)
self.reset_network(context, instance_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def remove_fixed_ip_from_instance(self, context, instance_id, address):
"""Calls network_api to remove existing fixed_ip from instance
by injecting the altered network info and resetting
instance networking.
"""
self.network_api.remove_fixed_ip_from_instance(context, instance_id,
address)
self.inject_network_info(context, instance_id)
self.reset_network(context, instance_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def pause_instance(self, context, instance_id):
"""Pause an instance on this host."""
LOG.audit(_('instance %s: pausing'), instance_id, context=context)
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
self.driver.pause(instance_ref, lambda result: None)
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
instance_id,
power_state=current_power_state,
vm_state=vm_states.PAUSED,
task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def unpause_instance(self, context, instance_id):
"""Unpause a paused instance on this host."""
LOG.audit(_('instance %s: unpausing'), instance_id, context=context)
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
self.driver.unpause(instance_ref, lambda result: None)
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
instance_id,
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def host_power_action(self, context, host=None, action=None):
"""Reboots, shuts down or powers up the host."""
return self.driver.host_power_action(host, action)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def set_host_enabled(self, context, host=None, enabled=None):
"""Sets the specified host's ability to accept new instances."""
return self.driver.set_host_enabled(host, enabled)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for an instance on this host."""
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref["power_state"] == power_state.RUNNING:
LOG.audit(_("instance %s: retrieving diagnostics"), instance_id,
context=context)
return self.driver.get_diagnostics(instance_ref)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def suspend_instance(self, context, instance_id):
"""Suspend the given instance."""
LOG.audit(_('instance %s: suspending'), instance_id, context=context)
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
self.driver.suspend(instance_ref, lambda result: None)
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
instance_id,
power_state=current_power_state,
vm_state=vm_states.SUSPENDED,
task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def resume_instance(self, context, instance_id):
"""Resume the given suspended instance."""
LOG.audit(_('instance %s: resuming'), instance_id, context=context)
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
self.driver.resume(instance_ref, lambda result: None)
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
instance_id,
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def lock_instance(self, context, instance_id):
"""Lock the given instance."""
context = context.elevated()
LOG.debug(_('instance %s: locking'), instance_id, context=context)
self.db.instance_update(context, instance_id, {'locked': True})
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def unlock_instance(self, context, instance_id):
"""Unlock the given instance."""
context = context.elevated()
LOG.debug(_('instance %s: unlocking'), instance_id, context=context)
self.db.instance_update(context, instance_id, {'locked': False})
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def get_lock(self, context, instance_id):
"""Return the boolean state of the given instance's lock."""
context = context.elevated()
LOG.debug(_('instance %s: getting locked state'), instance_id,
context=context)
if utils.is_uuid_like(instance_id):
uuid = instance_id
instance_ref = self.db.instance_get_by_uuid(context, uuid)
else:
instance_ref = self.db.instance_get(context, instance_id)
return instance_ref['locked']
@checks_instance_lock
def reset_network(self, context, instance_id):
"""Reset networking on the given instance."""
instance = self.db.instance_get(context, instance_id)
LOG.debug(_('instance %s: reset network'), instance_id,
context=context)
self.driver.reset_network(instance)
@checks_instance_lock
def inject_network_info(self, context, instance_id):
"""Inject network info for the given instance."""
LOG.debug(_('instance %s: inject network info'), instance_id,
context=context)
instance = self.db.instance_get(context, instance_id)
network_info = self._get_instance_nw_info(context, instance)
LOG.debug(_("network_info to inject: |%s|"), network_info)
self.driver.inject_network_info(instance, network_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def get_console_output(self, context, instance_id):
"""Send the console output for the given instance."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
output = self.driver.get_console_output(instance_ref)
return output.decode('utf-8', 'replace').encode('ascii', 'replace')
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def get_ajax_console(self, context, instance_id):
"""Return connection information for an ajax console."""
context = context.elevated()
LOG.debug(_("instance %s: getting ajax console"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_ajax_console(instance_ref)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def get_vnc_console(self, context, instance_id):
"""Return connection information for a vnc console."""
context = context.elevated()
LOG.debug(_("instance %s: getting vnc console"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_vnc_console(instance_ref)
def _attach_volume_boot(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance at boot time. So actual attach
is done by instance creation"""
# TODO(yamahata):
# should move check_attach to volume manager?
volume.API().check_attach(context, volume_id)
context = context.elevated()
LOG.audit(_("instance %(instance_id)s: booting with "
"volume %(volume_id)s at %(mountpoint)s") %
locals(), context=context)
dev_path = self.volume_manager.setup_compute_volume(context, volume_id)
self.db.volume_attached(context, volume_id, instance_id, mountpoint)
return dev_path
@checks_instance_lock
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("instance %(instance_id)s: attaching volume %(volume_id)s"
" to %(mountpoint)s") % locals(), context=context)
dev_path = self.volume_manager.setup_compute_volume(context,
volume_id)
try:
self.driver.attach_volume(instance_ref['name'],
dev_path,
mountpoint)
self.db.volume_attached(context,
volume_id,
instance_id,
mountpoint)
values = {
'instance_id': instance_id,
'device_name': mountpoint,
'delete_on_termination': False,
'virtual_name': None,
'snapshot_id': None,
'volume_id': volume_id,
'volume_size': None,
'no_device': None}
self.db.block_device_mapping_create(context, values)
except Exception as exc: # pylint: disable=W0702
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
LOG.exception(_("instance %(instance_id)s: attach failed"
" %(mountpoint)s, removing") % locals(), context=context)
self.volume_manager.remove_compute_volume(context,
volume_id)
raise exc
return True
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def _detach_volume(self, context, instance_id, volume_id, destroy_bdm):
"""Detach a volume from an instance."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
volume_ref = self.db.volume_get(context, volume_id)
mp = volume_ref['mountpoint']
LOG.audit(_("Detach volume %(volume_id)s from mountpoint %(mp)s"
" on instance %(instance_id)s") % locals(), context=context)
if instance_ref['name'] not in self.driver.list_instances():
LOG.warn(_("Detaching volume from unknown instance %s"),
instance_id, context=context)
else:
self.driver.detach_volume(instance_ref['name'],
volume_ref['mountpoint'])
self.volume_manager.remove_compute_volume(context, volume_id)
self.db.volume_detached(context, volume_id)
if destroy_bdm:
self.db.block_device_mapping_destroy_by_instance_and_volume(
context, instance_id, volume_id)
return True
def detach_volume(self, context, instance_id, volume_id):
"""Detach a volume from an instance."""
return self._detach_volume(context, instance_id, volume_id, True)
def remove_volume(self, context, volume_id):
"""Remove volume on compute host.
:param context: security context
:param volume_id: volume ID
"""
self.volume_manager.remove_compute_volume(context, volume_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def compare_cpu(self, context, cpu_info):
"""Checks that the host cpu is compatible with a cpu given by xml.
:param context: security context
:param cpu_info: json string obtained from virConnect.getCapabilities
:returns: See driver.compare_cpu
"""
return self.driver.compare_cpu(cpu_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def create_shared_storage_test_file(self, context):
"""Makes tmpfile under FLAGS.instance_path.
This method enables compute nodes to recognize that they mounts
same shared storage. (create|check|creanup)_shared_storage_test_file()
is a pair.
:param context: security context
:returns: tmpfile name(basename)
"""
dirpath = FLAGS.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.") % tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def check_shared_storage_test_file(self, context, filename):
"""Confirms existence of the tmpfile under FLAGS.instances_path.
Cannot confirm tmpfile return False.
:param context: security context
:param filename: confirm existence of FLAGS.instances_path/thisfile
"""
tmp_file = os.path.join(FLAGS.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def cleanup_shared_storage_test_file(self, context, filename):
"""Removes existence of the tmpfile under FLAGS.instances_path.
:param context: security context
:param filename: remove existence of FLAGS.instances_path/thisfile
"""
tmp_file = os.path.join(FLAGS.instances_path, filename)
os.remove(tmp_file)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def update_available_resource(self, context):
"""See comments update_resource_info.
:param context: security context
:returns: See driver.update_available_resource()
"""
return self.driver.update_available_resource(context, self.host)
def pre_live_migration(self, context, instance_id, time=None,
block_migration=False, disk=None):
"""Preparations for live migration at dest host.
:param context: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param block_migration: if true, prepare for block migration
"""
if not time:
time = greenthread
# Getting instance info
instance_ref = self.db.instance_get(context, instance_id)
hostname = instance_ref['hostname']
# Getting fixed ips
fixed_ips = self.db.instance_get_fixed_addresses(context, instance_id)
if not fixed_ips:
raise exception.FixedIpNotFoundForInstance(instance_id=instance_id)
# If any volume is mounted, prepare here.
if not instance_ref['volumes']:
LOG.info(_("%s has no volume."), hostname)
else:
for v in instance_ref['volumes']:
self.volume_manager.setup_compute_volume(context, v['id'])
# Bridge settings.
# Call this method prior to ensure_filtering_rules_for_instance,
# since bridge is not set up, ensure_filtering_rules_for instance
# fails.
#
# Retry operation is necessary because continuously request comes,
# concorrent request occurs to iptables, then it complains.
network_info = self._get_instance_nw_info(context, instance_ref)
max_retry = FLAGS.live_migration_retry_count
for cnt in range(max_retry):
try:
self.driver.plug_vifs(instance_ref, network_info)
break
except exception.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warn(_("plug_vifs() failed %(cnt)d."
"Retry up to %(max_retry)d for %(hostname)s.")
% locals())
time.sleep(1)
# Creating filters to hypervisors and firewalls.
# An example is that nova-instance-instance-xxx,
# which is written to libvirt.xml(Check "virsh nwfilter-list")
# This nwfilter is necessary on the destination host.
# In addition, this method is creating filtering rule
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance_ref,
network_info)
# Preparation for block migration
if block_migration:
self.driver.pre_block_migration(context,
instance_ref,
disk)
def live_migration(self, context, instance_id,
dest, block_migration=False):
"""Executing live migration.
:param context: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param dest: destination host
:param block_migration: if true, do block migration
"""
# Get instance for error handling.
instance_ref = self.db.instance_get(context, instance_id)
try:
# Checking volume node is working correctly when any volumes
# are attached to instances.
if instance_ref['volumes']:
rpc.call(context,
FLAGS.volume_topic,
{"method": "check_for_export",
"args": {'instance_id': instance_id}})
if block_migration:
disk = self.driver.get_instance_disk_info(context,
instance_ref)
else:
disk = None
rpc.call(context,
self.db.queue_get_for(context, FLAGS.compute_topic, dest),
{"method": "pre_live_migration",
"args": {'instance_id': instance_id,
'block_migration': block_migration,
'disk': disk}})
except Exception:
i_name = instance_ref.name
msg = _("Pre live migration for %(i_name)s failed at %(dest)s")
LOG.error(msg % locals())
self.rollback_live_migration(context, instance_ref,
dest, block_migration)
raise
# Executing live migration
# live_migration might raises exceptions, but
# nothing must be recovered in this version.
self.driver.live_migration(context, instance_ref, dest,
self.post_live_migration,
self.rollback_live_migration,
block_migration)
def post_live_migration(self, ctxt, instance_ref,
dest, block_migration=False):
"""Post operations for live migration.
This method is called from live_migration
and mainly updating database record.
:param ctxt: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param dest: destination host
:param block_migration: if true, do block migration
"""
LOG.info(_('post_live_migration() is started..'))
instance_id = instance_ref['id']
# Detaching volumes.
try:
for vol in self.db.volume_get_all_by_instance(ctxt, instance_id):
self.volume_manager.remove_compute_volume(ctxt, vol['id'])
except exception.NotFound:
pass
# Releasing vlan.
# (not necessary in current implementation?)
network_info = self._get_instance_nw_info(ctxt, instance_ref)
# Releasing security group ingress rule.
self.driver.unfilter_instance(instance_ref, network_info)
# Database updating.
i_name = instance_ref.name
try:
# Not return if floating_ip is not found, otherwise,
# instance never be accessible..
floating_ip = self.db.instance_get_floating_address(ctxt,
instance_id)
if not floating_ip:
LOG.info(_('No floating_ip is found for %s.'), i_name)
else:
floating_ip_ref = self.db.floating_ip_get_by_address(ctxt,
floating_ip)
self.db.floating_ip_update(ctxt,
floating_ip_ref['address'],
{'host': dest})
except exception.NotFound:
LOG.info(_('No floating_ip is found for %s.'), i_name)
except Exception, e:
LOG.error(_("Live migration: Unexpected error: "
"%(i_name)s cannot inherit floating "
"ip.\n%(e)s") % (locals()))
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
rpc.call(ctxt,
self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest),
{"method": "post_live_migration_at_destination",
"args": {'instance_id': instance_ref.id,
'block_migration': block_migration}})
# Restore instance state
current_power_state = self._get_power_state(ctxt, instance_ref)
self._instance_update(ctxt,
instance_ref["id"],
host=dest,
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None)
# Restore volume state
for volume_ref in instance_ref['volumes']:
volume_id = volume_ref['id']
self.db.volume_update(ctxt, volume_id, {'status': 'in-use'})
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
if block_migration:
self.driver.destroy(instance_ref, network_info)
LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.')
% locals())
LOG.info(_("You may see the error \"libvirt: QEMU error: "
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."))
def post_live_migration_at_destination(self, context,
instance_id, block_migration=False):
"""Post operations for live migration .
:param context: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param block_migration: block_migration
"""
instance_ref = self.db.instance_get(context, instance_id)
LOG.info(_('Post operation of migraton started for %s .')
% instance_ref.name)
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.post_live_migration_at_destination(context,
instance_ref,
network_info,
block_migration)
def rollback_live_migration(self, context, instance_ref,
dest, block_migration):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param dest:
This method is called from live migration src host.
This param specifies destination host.
"""
host = instance_ref['host']
self._instance_update(context,
instance_ref['id'],
host=host,
vm_state=vm_states.ACTIVE,
task_state=None)
for volume_ref in instance_ref['volumes']:
volume_id = volume_ref['id']
self.db.volume_update(context, volume_id, {'status': 'in-use'})
volume.API().remove_from_compute(context, volume_id, dest)
# Block migration needs empty image at destination host
# before migration starts, so if any failure occurs,
# any empty images has to be deleted.
if block_migration:
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, dest),
{"method": "rollback_live_migration_at_destination",
"args": {'instance_id': instance_ref['id']}})
def rollback_live_migration_at_destination(self, context, instance_id):
""" Cleaning up image directory that is created pre_live_migration.
:param context: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
"""
instances_ref = self.db.instance_get(context, instance_id)
network_info = self._get_instance_nw_info(context, instances_ref)
self.driver.destroy(instances_ref, network_info)
def periodic_tasks(self, context=None):
"""Tasks to be run at a periodic interval."""
error_list = super(ComputeManager, self).periodic_tasks(context)
if error_list is None:
error_list = []
try:
if FLAGS.rescue_timeout > 0:
self.driver.poll_rescued_instances(FLAGS.rescue_timeout)
except Exception as ex:
LOG.warning(_("Error during poll_rescued_instances: %s"),
unicode(ex))
error_list.append(ex)
try:
self._report_driver_status()
except Exception as ex:
LOG.warning(_("Error during report_driver_status(): %s"),
unicode(ex))
error_list.append(ex)
try:
self._sync_power_states(context)
except Exception as ex:
LOG.warning(_("Error during power_state sync: %s"), unicode(ex))
error_list.append(ex)
return error_list
def _report_driver_status(self):
curr_time = time.time()
if curr_time - self._last_host_check > FLAGS.host_state_interval:
self._last_host_check = curr_time
LOG.info(_("Updating host status"))
# This will grab info about the host and queue it
# to be sent to the Schedulers.
self.update_service_capabilities(
self.driver.get_host_stats(refresh=True))
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
The hypervisor is authoritative for the power_state data, so we
simply loop over all known instances for this host and update the
power_state according to the hypervisor. If the instance is not found
then it will be set to power_state.NOSTATE, because it doesn't exist
on the hypervisor.
"""
vm_instances = self.driver.list_instances_detail()
vm_instances = dict((vm.name, vm) for vm in vm_instances)
db_instances = self.db.instance_get_all_by_host(context, self.host)
num_vm_instances = len(vm_instances)
num_db_instances = len(db_instances)
if num_vm_instances != num_db_instances:
LOG.info(_("Found %(num_db_instances)s in the database and "
"%(num_vm_instances)s on the hypervisor.") % locals())
for db_instance in db_instances:
name = db_instance["name"]
db_power_state = db_instance['power_state']
vm_instance = vm_instances.get(name)
if vm_instance is None:
vm_power_state = power_state.NOSTATE
else:
vm_power_state = vm_instance.state
if vm_power_state == db_power_state:
continue
self._instance_update(context,
db_instance["id"],
power_state=vm_power_state)
| 44.765771
| 79
| 0.59916
|
549113866e7dfe949945e273539297e622ed8626
| 7,115
|
py
|
Python
|
chromedriver_autoinstaller/utils.py
|
jaymegordo/python-chromedriver-autoinstaller
|
5578d224f1bdfb286437605ec093c9513b0c074b
|
[
"MIT"
] | null | null | null |
chromedriver_autoinstaller/utils.py
|
jaymegordo/python-chromedriver-autoinstaller
|
5578d224f1bdfb286437605ec093c9513b0c074b
|
[
"MIT"
] | null | null | null |
chromedriver_autoinstaller/utils.py
|
jaymegordo/python-chromedriver-autoinstaller
|
5578d224f1bdfb286437605ec093c9513b0c074b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Helper functions for filename and URL generation.
"""
import sys
import os
import subprocess
import urllib.request
import urllib.error
import zipfile
import xml.etree.ElementTree as elemTree
import logging
import re
from io import BytesIO
__author__ = 'Yeongbin Jo <iam.yeongbin.jo@gmail.com>'
def get_chromedriver_filename():
"""
Returns the filename of the binary for the current platform.
:return: Binary filename
"""
if sys.platform.startswith('win'):
return 'chromedriver.exe'
return 'chromedriver'
def get_variable_separator():
"""
Returns the environment variable separator for the current platform.
:return: Environment variable separator
"""
if sys.platform.startswith('win'):
return ';'
return ':'
def get_platform_architecture():
if sys.platform.startswith('linux') and sys.maxsize > 2 ** 32:
platform = 'linux'
architecture = '64'
elif sys.platform == 'darwin':
platform = 'mac'
architecture = '64'
elif sys.platform.startswith('win'):
platform = 'win'
architecture = '32'
else:
raise RuntimeError('Could not determine chromedriver download URL for this platform.')
return platform, architecture
def get_chromedriver_url(version):
"""
Generates the download URL for current platform , architecture and the given version.
Supports Linux, MacOS and Windows.
:param version: chromedriver version string
:return: Download URL for chromedriver
"""
base_url = 'https://chromedriver.storage.googleapis.com/'
platform, architecture = get_platform_architecture()
return base_url + version + '/chromedriver_' + platform + architecture + '.zip'
def find_binary_in_path(filename):
"""
Searches for a binary named `filename` in the current PATH. If an executable is found, its absolute path is returned
else None.
:param filename: Filename of the binary
:return: Absolute path or None
"""
if 'PATH' not in os.environ:
return None
for directory in os.environ['PATH'].split(get_variable_separator()):
binary = os.path.abspath(os.path.join(directory, filename))
if os.path.isfile(binary) and os.access(binary, os.X_OK):
return binary
return None
def check_version(binary, required_version):
try:
version = subprocess.check_output([binary, '-v'])
version = re.match(r'.*?([\d.]+).*?', version.decode('utf-8'))[1]
if version == required_version:
return True
except Exception:
return False
return False
def get_chrome_version():
"""
:return: the version of chrome installed on client
"""
platform, _ = get_platform_architecture()
if platform == 'linux':
with subprocess.Popen(['chromium-browser', '--version'], stdout=subprocess.PIPE) as proc:
version = proc.stdout.read().decode('utf-8').replace('Chromium', '').strip()
version = version.replace('Google Chrome', '').strip()
elif platform == 'mac':
process = subprocess.Popen(['/Applications/Google Chrome.app/Contents/MacOS/Google Chrome', '--version'], stdout=subprocess.PIPE)
version = process.communicate()[0].decode('UTF-8').replace('Google Chrome', '').strip()
elif platform == 'win':
process = subprocess.Popen(
['reg', 'query', 'HKEY_CURRENT_USER\\Software\\Google\\Chrome\\BLBeacon', '/v', 'version'],
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, stdin=subprocess.DEVNULL
)
version = process.communicate()[0].decode('UTF-8').strip().split()[-1]
else:
return
return version
def get_major_version(version):
"""
:param version: the version of chrome
:return: the major version of chrome
"""
return version.split('.')[0]
def get_matched_chromedriver_version(version):
"""
:param version: the version of chrome
:return: the version of chromedriver
"""
doc = urllib.request.urlopen('https://chromedriver.storage.googleapis.com').read()
root = elemTree.fromstring(doc)
for k in root.iter('{http://doc.s3.amazonaws.com/2006-03-01}Key'):
if k.text.find(get_major_version(version) + '.') == 0:
return k.text.split('/')[0]
return
def get_chromedriver_path():
"""
:return: path of the chromedriver binary
"""
return os.path.abspath(os.path.dirname(__file__))
def print_chromedriver_path():
"""
Print the path of the chromedriver binary.
"""
print(get_chromedriver_path())
def download_chromedriver(cwd=False, p_install=None):
"""
Downloads, unzips and installs chromedriver.
If a chromedriver binary is found in PATH it will be copied, otherwise downloaded.
:param p_install: Custom install location
:param cwd: Flag indicating whether to download to current working directory
:return: The file path of chromedriver
"""
chrome_version = get_chrome_version()
if not chrome_version:
logging.debug('Chrome is not installed.')
return
chromedriver_version = get_matched_chromedriver_version(chrome_version)
if not chromedriver_version:
logging.debug('Can not find chromedriver for currently installed chrome version.')
return
major_version = get_major_version(chromedriver_version)
# allow custom path for install
if not p_install is None:
chromedriver_dir = os.path.join(
os.path.abspath(str(p_install)),
major_version
)
elif cwd:
chromedriver_dir = os.path.join(
os.path.abspath(os.getcwd()),
major_version
)
else:
chromedriver_dir = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
major_version
)
chromedriver_filename = get_chromedriver_filename()
chromedriver_filepath = os.path.join(chromedriver_dir, chromedriver_filename)
if not os.path.isfile(chromedriver_filepath) or \
not check_version(chromedriver_filepath, chromedriver_version):
logging.debug(f'Downloading chromedriver ({chromedriver_version})...')
if not os.path.isdir(chromedriver_dir):
os.makedirs(chromedriver_dir)
url = get_chromedriver_url(version=chromedriver_version)
try:
response = urllib.request.urlopen(url)
if response.getcode() != 200:
raise urllib.error.URLError('Not Found')
except urllib.error.URLError:
raise RuntimeError(f'Failed to download chromedriver archive: {url}')
archive = BytesIO(response.read())
with zipfile.ZipFile(archive) as zip_file:
zip_file.extract(chromedriver_filename, chromedriver_dir)
else:
logging.debug('Chromedriver is already installed.')
if not os.access(chromedriver_filepath, os.X_OK):
os.chmod(chromedriver_filepath, 0o744)
return chromedriver_filepath
if __name__ == '__main__':
print(get_chrome_version())
print(download_chromedriver())
| 32.939815
| 137
| 0.666901
|
ec5edfea34428f0cda1e6e5c433e392bebf44589
| 1,888
|
py
|
Python
|
setup.py
|
nicedexter/landslide
|
0af037f37c34e351a271f80e4479193aaa48ea56
|
[
"Apache-2.0"
] | 1,025
|
2015-01-05T09:19:40.000Z
|
2022-03-30T09:11:18.000Z
|
setup.py
|
nicedexter/landslide
|
0af037f37c34e351a271f80e4479193aaa48ea56
|
[
"Apache-2.0"
] | 82
|
2015-01-21T19:49:36.000Z
|
2021-02-02T01:08:54.000Z
|
setup.py
|
nicedexter/landslide
|
0af037f37c34e351a271f80e4479193aaa48ea56
|
[
"Apache-2.0"
] | 241
|
2015-01-23T09:22:18.000Z
|
2021-11-24T11:13:23.000Z
|
from setuptools import setup
import landslide
setup(
name=landslide.__title__,
version=landslide.__version__,
description='HTML5 slideshow generator for Markdown, ReST, and Textile',
packages=['landslide'],
include_package_data=True,
zip_safe=False,
author=landslide.__author__,
author_email=landslide.__author_email__,
url='http://github.com/adamzap/landslide',
license=landslide.__license__,
platforms=['any'],
keywords=[
'markdown',
'slideshow',
'presentation',
'rst',
'restructuredtext',
'textile'
],
install_requires=[
'Jinja2==2.10.1',
'Markdown==2.6.11',
'Pygments==2.2.0',
'docutils==0.14',
'six==1.11.0'
],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'License :: OSI Approved :: Apache Software License',
'Topic :: Multimedia :: Graphics :: Presentation',
'Topic :: Text Processing :: Markup'
],
long_description='''\
Landslide takes your Markdown, ReST, or Textile file(s) and generates fancy
HTML5 slideshow like `this <http://landslide.adamzap.com/>`_.
Read the `README <http://github.com/adamzap/landslide/blob/master/README.md>`_
for formatting instructions and more information.
''',
entry_points={
'console_scripts': [
'landslide = landslide.main:main',
]
},
)
| 30.95082
| 78
| 0.608581
|
6432e2520a5f559d925ea79b26815bcbf1573230
| 3,542
|
py
|
Python
|
src/bcsim/__main__.py
|
geozeke/bcsim
|
cda4c7b458f434033b001c06bc53bc544d703256
|
[
"MIT"
] | null | null | null |
src/bcsim/__main__.py
|
geozeke/bcsim
|
cda4c7b458f434033b001c06bc53bc544d703256
|
[
"MIT"
] | 6
|
2021-12-23T15:49:33.000Z
|
2022-01-21T15:47:28.000Z
|
src/bcsim/__main__.py
|
geozeke/bcsim
|
cda4c7b458f434033b001c06bc53bc544d703256
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Main entry point for bcsim."""
import argparse
import curses
import sys
from datetime import datetime as dt
from pathlib import Path
from bcsim import clear
from bcsim import runSimulation
# --------------------------------------------------------------------
def numballs_type(n):
"""Input validation for argument parser.
Parameters
----------
n : int
The number of balls to be modeled in a clock
Returns
-------
int
Ensures the command line input is returned as a valid int
Raises
------
argparse.ArgumentError
Raised if input exceeds the range of allowable balls in a clock
(27 <= n <= 1000)
argparse.ArgumentTypeError
Raised if command line input cannot be cast to a valid int.
"""
msg = "min must be >= 27; max must be <= 1000"
try:
v = int(n)
if v < 27 or v > 1000:
raise argparse.ArgumentError(msg)
except ValueError:
raise argparse.ArgumentTypeError("min and max must be integers")
return v
# --------------------------------------------------------------------
def main():
"""Kick things off.
The main function:
1. Sets up an argument parser to capture and validate command line
input.
2. Sets up the curses environment for screen control.
3. Starts a timer.
4. Runs the simulation.
5. Stops the timer and shows statistics, including elapsed time.
"""
msg = """Rolling ball clock simulator."""
epi = "Version 1.0.5"
parser = argparse.ArgumentParser(description=msg, epilog=epi)
msg = """minimum number of balls in the clock - the smallest
permissible minimum value is 27."""
parser.add_argument('min',
help=msg,
type=numballs_type)
msg = """maximum number of balls in the clock - the largest
permissible maximum value is 1000."""
parser.add_argument('max',
help=msg,
type=numballs_type)
msg = """name of output file to hold simulation results. Results are
saved in csv format (balls, number of simulated days, time to
complete the simulated run)."""
parser.add_argument('outfile',
type=argparse.FileType('w'),
help=msg)
msg = """run the simulation in \'fast\' mode. In this mode, each
incremental movement of the clock is 12-hrs. The default behavior is
for each incremental movement of the clock to be 1-min."""
parser.add_argument('-f', '--fast',
help=msg,
action='store_true')
args = parser.parse_args()
if args.max < args.min:
parser.print_usage()
print('error: max must be >= min')
sys.exit(1)
# Start the clock
start = dt.now()
# Launch simulation; stop the clock; close open file
curses.wrapper(runSimulation, args)
stop = dt.now()
args.outfile.close()
# Show post-simulation results
clear()
clocks = args.max - args.min + 1
print('Simulation complete\n')
print(f' Total elapsed time: {str(stop-start)}')
print(f'Number of clocks simulated: {clocks}')
print(f' Minimum number of balls: {args.min}')
print(f' Maximum number of balls: {args.max}')
print(f' Results saved to: {Path(args.outfile.name)}\n')
return
# --------------------------------------------------------------------
if __name__ == '__main__':
main()
| 28.111111
| 72
| 0.572276
|
458e0417d97738bb52dea8d4d1edf065cc0f28dc
| 4,057
|
py
|
Python
|
TextBoxSeg/tools/demo_ic15.py
|
weijiawu/Unconstrained-Text-Detection-with-Box-Supervisionand-Dynamic-Self-Training
|
b4954e20a3ce0c5b97e8d992c4282b97c7a8c1f0
|
[
"Apache-1.1"
] | 31
|
2020-11-26T02:10:11.000Z
|
2022-03-14T06:21:46.000Z
|
TextBoxSeg/tools/demo_ic15.py
|
weijiawu/Unconstrained-Text-Detection-with-Box-Supervisionand-Dynamic-Self-Training
|
b4954e20a3ce0c5b97e8d992c4282b97c7a8c1f0
|
[
"Apache-1.1"
] | 4
|
2020-11-25T18:34:46.000Z
|
2021-04-29T03:10:38.000Z
|
TextBoxSeg/tools/demo_ic15.py
|
weijiawu/Unconstrained-Text-Detection-with-Box-Supervisionand-Dynamic-Self-Training
|
b4954e20a3ce0c5b97e8d992c4282b97c7a8c1f0
|
[
"Apache-1.1"
] | 3
|
2020-11-30T16:25:16.000Z
|
2022-01-14T12:39:37.000Z
|
import os
import sys
import torch
cur_path = os.path.abspath(os.path.dirname(__file__))
root_path = os.path.split(cur_path)[0]
sys.path.append(root_path)
from torchvision import transforms
from PIL import Image
from segmentron.utils.visualize import get_color_pallete
from segmentron.models.model_zoo import get_segmentation_model
from segmentron.utils.options import parse_args
from segmentron.utils.default_setup import default_setup
from segmentron.config import cfg
from IPython import embed
import numpy as np
from tqdm import trange
import cv2
def demo():
args = parse_args()
cfg.update_from_file(args.config_file)
cfg.PHASE = 'test'
cfg.ROOT_PATH = root_path
cfg.check_and_freeze()
default_setup(args)
# output folder
output_dir = 'demo/trash/IC15'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# image transform
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(cfg.DATASET.MEAN, cfg.DATASET.STD),
])
model = get_segmentation_model().to(args.device)
model.eval()
#get img_patch from IC15
if os.path.exists('/mnt/lustre/share_data/xieenze/xez_space/Text/ICDAR2015/'):
ic15_root_path = '/mnt/lustre/share_data/xieenze/xez_space/Text/ICDAR2015/'
else:
ic15_root_path = '/mnt/lustre/share/xieenze/Text/ICDAR2015/'
ic15_train_data = ic15_root_path + 'ch4_training_images'
ic15_train_gt = ic15_root_path + 'ch4_training_localization_transcription_gt'
assert os.path.exists(ic15_train_data) and os.path.exists(ic15_train_gt)
patch_imgs = []
for i in trange(1, 501):
img_path = 'img_{}.jpg'.format(i)
img_path = os.path.join(ic15_train_data, img_path)
gt_path = 'gt_img_{}.txt'.format(i)
gt_path = os.path.join(ic15_train_gt, gt_path)
if os.path.exists(gt_path) and os.path.exists(img_path):
img, boxes = parse_img_gt(img_path, gt_path)
img = np.array(img)
if boxes == []:
continue
for box in boxes:
x1, y1, x2, y2 = box
patch = img[y1:y2 + 1, x1:x2 + 1]
patch_imgs.append(Image.fromarray(patch))
# 先只测500张
if len(patch_imgs) > 500:
break
else:
print(img_path)
print('total patch images:{}'.format(len(patch_imgs)))
pool_imgs, pool_masks = [], []
count = 0
for image in patch_imgs:
# image = Image.open(img_path).convert('RGB')
resized_img = image.resize(cfg.TRAIN.BASE_SIZE)
resized_img = transform(resized_img).unsqueeze(0).to(args.device)
with torch.no_grad():
output = model(resized_img)
pred = torch.argmax(output[0], 1).squeeze(0).cpu().data.numpy()
img = np.array(image.resize(cfg.TRAIN.BASE_SIZE))
mask = np.array(get_color_pallete(pred, cfg.DATASET.NAME))[:,:,None].repeat(3,-1) * 255
if len(pool_imgs)<20:
pool_imgs.append(img)
pool_masks.append(mask)
else:
big_img = np.concatenate(pool_imgs, axis=0)
big_mask = np.concatenate(pool_masks, axis=0)
big_img_mask = Image.fromarray(np.concatenate([big_img, big_mask], axis=1))
big_img_mask.save('{}/{}.png'.format(output_dir, count))
print('{}/{}.png'.format(output_dir, count))
count += 1
pool_imgs, pool_masks = [], []
def parse_img_gt(img_path, gt_path):
img = Image.open(img_path)
with open(gt_path,'r') as f:
data=f.readlines()
boxes = []
for d in data:
d = d.replace('\n','').split(',')
polygon = d[:8]; text = d[8]
if "#" in text:
continue #过滤掉ignore的
polygon = [int(i.replace('\ufeff','')) for i in polygon]
polygon_np = np.array(polygon).reshape([-1, 2])
x, y, w, h = cv2.boundingRect(polygon_np)
boxes.append([x,y,x+w,y+h])
return img,boxes
if __name__ == '__main__':
demo()
| 34.092437
| 95
| 0.629529
|
2b41fbf258e421555e1cf3e3608ed7f38c349d07
| 116
|
py
|
Python
|
src/onevision/models/classification/convnext/__init__.py
|
phlong3105/onevision
|
90552b64df7213e7fbe23c80ffd8a89583289433
|
[
"MIT"
] | 2
|
2022-03-28T09:46:38.000Z
|
2022-03-28T14:12:32.000Z
|
src/onevision/models/classification/convnext/__init__.py
|
phlong3105/onevision
|
90552b64df7213e7fbe23c80ffd8a89583289433
|
[
"MIT"
] | null | null | null |
src/onevision/models/classification/convnext/__init__.py
|
phlong3105/onevision
|
90552b64df7213e7fbe23c80ffd8a89583289433
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import annotations
from .convnext import *
| 11.6
| 34
| 0.637931
|
85ee3138f0ba6225b746d4c6c86cb184ca645641
| 238
|
py
|
Python
|
examples/04relative-nested/app/config.py
|
podhmo/magicalimport
|
b9c516d9d134ca578a85f934caa8c9f9ce6b4fa9
|
[
"MIT"
] | null | null | null |
examples/04relative-nested/app/config.py
|
podhmo/magicalimport
|
b9c516d9d134ca578a85f934caa8c9f9ce6b4fa9
|
[
"MIT"
] | 9
|
2016-10-01T15:25:20.000Z
|
2021-02-18T05:25:43.000Z
|
examples/04relative-nested/app/config.py
|
podhmo/magicalimport
|
b9c516d9d134ca578a85f934caa8c9f9ce6b4fa9
|
[
"MIT"
] | 1
|
2017-07-19T12:38:56.000Z
|
2017-07-19T12:38:56.000Z
|
from . import shapes # this is OK
# "import shapes" is NG, because this module not in sys.path
# ModuleNotFoundError: No module named 'shapes'
# see also: ../../03relative/config.py
config = shapes.Config(host="localhost", port=44444)
| 29.75
| 60
| 0.722689
|
128cd18f26a885dbdb8dee324092bc3877e18ef7
| 8,175
|
py
|
Python
|
src/schedule/algorithms/bf.py
|
HiEST/gpu-topo-aware
|
8125c2875ad942b9cecd9d5178062ee0d5100d04
|
[
"Apache-2.0"
] | 7
|
2019-02-28T09:53:59.000Z
|
2022-01-06T06:18:02.000Z
|
src/schedule/algorithms/bf.py
|
HiEST/gpu-topo-aware
|
8125c2875ad942b9cecd9d5178062ee0d5100d04
|
[
"Apache-2.0"
] | null | null | null |
src/schedule/algorithms/bf.py
|
HiEST/gpu-topo-aware
|
8125c2875ad942b9cecd9d5178062ee0d5100d04
|
[
"Apache-2.0"
] | 4
|
2018-05-06T14:42:10.000Z
|
2021-11-30T03:28:49.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# The second queue is used to add the jobs that could not be placed in the current scenario, e.g. load. This queue
# is defined to have priority against the former queue.
#
# Copyright © 2017 Marcelo Amaral <marcelo.amaral@bsc.es>
import copy
import random
random.seed(1234)
def update_resources(job, cluster_resources, mid):
for jgpu in job.get_alloc_gpus():
for pgpu in cluster_resources.machines[mid].sockets[jgpu.socket].gpus:
if jgpu.id == pgpu.id:
pgpu.allocated = True
return cluster_resources
def get_jobs(placement, cluster_resources):
placement = sorted(placement.iteritems(), key=lambda k: k[1].arrival_time)
jobs = []
# TODO: by now we have only one machine, but I need to change it for a cluster
if cluster_resources.machines[0].get_total_free_gpus() > 0:
for _, job in placement:
if job.get_num_gpus() <= cluster_resources.machines[0].get_total_free_gpus():
jobs.append(job)
# else:
# break
return jobs
def get_socket_id(machine):
"""Get the socket with less GPUs"""
socket_id = None
gpus = 1000
for socket in machine.sockets:
sgpu = machine.get_free_gpu_per_socket(socket.id)
if sgpu > 0:
if socket_id is None:
socket_id = socket.id
if gpus >= sgpu:
socket_id = socket.id
gpus = sgpu
return socket_id
def bf(curr_time, queue1, queue2, cluster_resources, profile, utility):
temp_resources = copy.deepcopy(cluster_resources)
placement = dict()
for _ in range(len(queue2)):
job = queue2.popleft()
placement[job.id] = job
for _ in range(len(queue1)):
job = queue1.popleft()
placement[job.id] = job
jobs = get_jobs(placement, cluster_resources)
# We use reserve sort to do pop in the first arrived job
jobs = sorted(jobs, key=lambda k: k.arrival_time, reverse=True)
while len(jobs) > 0:
job = jobs.pop()
best_placement = None
for mid, machine in temp_resources.machines.items():
mem = copy.deepcopy(job.mem)
task = copy.deepcopy(job.tasks)
num_gpus = job.get_num_gpus()
if machine.get_total_free_mem() >= mem and \
machine.get_total_free_cores() >= task and \
machine.get_total_free_gpus() >= num_gpus:
# create a solution candidate
solution = dict()
solution['machine'] = mid
solution['arrival_job_time'] = job.arrival_time
solution['mem_per_socket'] = [0, 0]
solution['core_per_socket'] = [0, 0]
solution['gpu_per_socket'] = [[], []]
# "Randomly" select the socket
for _ in machine.sockets:
socket_id = get_socket_id(machine)
if socket_id is not None:
if machine.get_free_mem_per_socket(socket_id) >= mem:
smem = mem
else:
smem = machine.get_free_mem_per_socket(socket_id)
if machine.get_free_core_per_socket(socket_id) >= task:
stask = task
else:
stask = machine.get_free_core_per_socket(socket_id)
if machine.get_free_gpu_per_socket(socket_id) >= num_gpus:
sgpu = num_gpus
else:
sgpu = machine.get_free_gpu_per_socket(socket_id)
# First, reduce the machine resources the requested resources
machine.alloc_mem_in_socket(socket_id, smem)
solution['mem_per_socket'][socket_id] += smem
mem -= smem
machine.alloc_core_in_socket(socket_id, stask)
solution['core_per_socket'][socket_id] += stask
task -= stask
# Randomly allocate the gpus
allocated_gpus = 1
for gpu in machine.sockets[socket_id].gpus:
if not gpu.allocated:
if allocated_gpus <= sgpu:
pair = dict()
pair["hgpu"] = gpu
pair["mid"] = mid
pair["job"] = job
solution['gpu_per_socket'][socket_id].append(pair)
allocated_gpus += 1
gpu.allocated = True
else:
break
num_gpus -= sgpu
# if the job resource was already allocated stop, otherwise allocate resources in other sockets
if mem == 0 and task == 0 and num_gpus == 0:
break
if mem == 0 and task == 0 and num_gpus == 0:
# if there were enough resource in this machine to place the job, save the placement decision
job.placement.placed = True
job.placement.start_time = curr_time
job.placement.end_time = None # TODO: When the simulator is on, set the time accordingly to the scenario
job.placement.machine = solution['machine']
job.placement.mem_per_socket = solution['mem_per_socket']
job.placement.core_per_socket = solution['core_per_socket']
job.placement.gpu_per_socket = solution['gpu_per_socket']
s = dict()
s['gpus'] = list()
for socket in solution['gpu_per_socket']:
for gpu in socket:
s['gpus'].append(gpu['hgpu'])
s['communication'] = utility.get_comm_cost(s['gpus'], s['gpus'], machine.gpu_distance,
machine.gpu_distance, None)
s['interference'] = utility.get_interference_in_job(job, machine, s['gpus'], profile)
s['fragmentation'] = utility.get_fragmentation(machine, s['gpus'])
job.placement.costs.comm = s['communication']
job.placement.costs.suffered_interf = s['interference']
job.placement.costs.frag = s['fragmentation']
job.placement.job_utility = utility.calculate_utility(s, system=False)
job.placement.machine = mid
# The System Utility also consider the impact of this job will do in the utility of the other running jobs
# So that, for each running job in the machine, it calculates the average Utility between them
# The goal is to maximize the minimal utility
if len(machine.running_jobs) > 0:
s['interference_on_other_jobs'] = utility.get_interference_on_other_jobs(job, machine,
s['gpus'], profile)
job.placement.costs.suffered_interf = s['interference_on_other_jobs']
job.placement.system_utility = utility.calculate_utility(s)
if best_placement is None:
best_placement = job
elif best_placement.placement.job_utility < job.placement.job_utility:
best_placement = job
# break
if best_placement is not None:
placement[best_placement.id] = best_placement
# update the resource allocation
temp_resources = update_resources(best_placement, temp_resources, best_placement.placement.machine)
placement = sorted(placement.iteritems(), key=lambda k: k[1].arrival_time, reverse=True)
placement = [s[1] for s in placement]
return placement
| 45.165746
| 122
| 0.539572
|
2452d0e5b744fcd399f14d8b147a42e7bcd90220
| 1,931
|
py
|
Python
|
skmultilearn/problem_transform/__init__.py
|
emrecncelik/scikit-multilearn
|
1d7f7b74702cb9a5a8245726bf38e23e1f2f3382
|
[
"BSD-2-Clause"
] | 763
|
2015-03-22T18:54:33.000Z
|
2022-03-25T07:54:04.000Z
|
skmultilearn/problem_transform/__init__.py
|
emrecncelik/scikit-multilearn
|
1d7f7b74702cb9a5a8245726bf38e23e1f2f3382
|
[
"BSD-2-Clause"
] | 187
|
2015-01-27T15:06:35.000Z
|
2022-03-22T21:41:47.000Z
|
skmultilearn/problem_transform/__init__.py
|
emrecncelik/scikit-multilearn
|
1d7f7b74702cb9a5a8245726bf38e23e1f2f3382
|
[
"BSD-2-Clause"
] | 157
|
2015-04-13T16:47:36.000Z
|
2022-03-17T19:12:59.000Z
|
"""
The :mod:`skmultilearn.problem_transform` module provides classifiers
that follow the problem transformation approaches to multi-label classification.
The problem transformation approach to multi-label classification converts multi-label problems to
single-label problems: single-class or multi-class.
+----------------------------------------------------------+------------------------------------------------+
| Classifier | Description |
+==========================================================+================================================+
| :class:`~skmultilearn.problem_transform.BinaryRelevance` | treats each label as a separate single-class |
| | classification problem |
+----------------------------------------------------------+------------------------------------------------+
| :class:`~skmultilearn.problem_transform.ClassifierChain` | treats each label as a part of a conditioned |
| | chain of single-class classification problems |
+----------------------------------------------------------+------------------------------------------------+
| :class:`~skmultilearn.problem_transform.LabelPowerset` | treats each label combination as a separate |
| | class with one multi-class classification |
| | problem |
+----------------------------------------------------------+------------------------------------------------+
"""
from .br import BinaryRelevance
from .cc import ClassifierChain
from .lp import LabelPowerset
__all__ = ["BinaryRelevance",
"ClassifierChain",
"LabelPowerset"]
| 58.515152
| 109
| 0.397204
|
fb80e583de6a468b2474efa1dcfc20d1531d5aeb
| 49,050
|
py
|
Python
|
src/data_processor/data_processor.py
|
krezac/tesla-race-analyzer
|
d86c18f335abea3801335dd8bcd58b9b35b487fe
|
[
"MIT"
] | 1
|
2021-01-27T23:26:49.000Z
|
2021-01-27T23:26:49.000Z
|
src/data_processor/data_processor.py
|
krezac/tesla-race-analyzer
|
d86c18f335abea3801335dd8bcd58b9b35b487fe
|
[
"MIT"
] | 1
|
2021-01-05T10:58:15.000Z
|
2021-01-05T10:58:15.000Z
|
src/data_processor/data_processor.py
|
krezac/tesla-race-analyzer
|
d86c18f335abea3801335dd8bcd58b9b35b487fe
|
[
"MIT"
] | null | null | null |
import pendulum
from typing import Dict, Any, List, Optional, Callable
from pydantic import BaseModel
import src.data_source.teslamate
from src.data_processor.labels import generate_labels
from src.data_models import Configuration, JsonLabelItem, JsonLabelGroup, JsonStatusResponse, JsonLapsResponse, JsonStaticSnapshot, JsonResponseListWrapper
from src.utils import function_timer
from src.enums import LabelFormatGroupEnum, CalculatedFieldScopeEnum
import src.data_processor.calculated_fields_positions
import src.data_processor.calculated_fields_laps
import src.data_processor.calculated_fields_forecast
from src.data_processor import lap_analyzer
import logging
logger = logging.getLogger(__name__)
class DataProcessor(BaseModel):
""" just wrapper around cached data"""
initial_status_raw: Optional[Dict[str, Any]]
current_status_raw: Optional[Dict[str, Any]]
current_status_formatted: Optional[JsonStatusResponse]
car_positions_raw: Optional[List[Dict[str, Any]]]
lap_list_raw: Optional[List[Dict[str, Any]]]
lap_list_formatted: Optional[JsonLapsResponse]
total_raw: Optional[Dict[str, Any]]
total_formatted: Optional[JsonLabelGroup]
charging_process_list_raw: Optional[List[Dict[str, Any]]]
charging_process_list_formatted: Optional[JsonResponseListWrapper]
forecast_raw: Optional[Dict[str, Any]]
forecast_formatted: Optional[JsonLabelGroup]
################
# data loaders #
################
@function_timer()
def _update_initial_status(self, car_id: int, start_time: pendulum.DateTime):
"""
Load initial status
Not meant to be called from outside
:param car_id: car id to load status for.
:param start_time: time to retrieve the status for.
:return: retrieved data
"""
return src.data_source.teslamate.get_car_status(car_id, start_time)
def _set_driver_change(self, record, dt: pendulum.DateTime):
from src.db_models import DriverChange
driver_change = DriverChange.query.filter(
(DriverChange.valid_from <= dt) & ((DriverChange.valid_to >= dt) | (DriverChange.valid_to == None))
).order_by(DriverChange.valid_from.desc()).first()
record['driver_name'] = driver_change.driver if driver_change else None
record['copilot_name'] = driver_change.copilot if driver_change else None
@function_timer()
def _load_status_raw(self, car_id: int, dt: pendulum.DateTime, *,
initial_status, _current_status=None, position_list, lap_list,
total, charging_process_list, forecast,
configuration: Configuration):
status = src.data_source.teslamate.get_car_status(car_id, dt)
# add driver change
self._set_driver_change(status, dt)
# update status by calculated fields
return self._enhance_status(status, dt,
initial_status=initial_status,
_current_status=status,
position_list=position_list,
lap_list=lap_list,
total=total,
charging_process_list=charging_process_list,
forecast=forecast,
configuration=configuration,
)
@function_timer()
def _load_positions(self, car_id: int, dt_start: pendulum.DateTime, dt_end: pendulum.DateTime, *,
initial_status, current_status, _position_list=None, lap_list,
total, charging_process_list, forecast,
configuration: Configuration) \
-> List[Dict[str, Any]]:
positions = src.data_source.teslamate.get_car_positions(car_id, dt_start, dt_end)
return self._enhance_positions(positions, dt_end,
initial_status=initial_status,
current_status=current_status,
_position_list=positions,
lap_list=lap_list,
total=total,
charging_process_list=charging_process_list,
forecast=forecast,
configuration=configuration,
)
@function_timer()
def _load_laps(self, positions, dt: pendulum.DateTime, *,
initial_status, current_status, position_list, _lap_list=None,
total, charging_process_list, forecast,
configuration: Configuration):
# TODO convert to finder
laps = lap_analyzer.find_laps(configuration, positions, configuration.start_radius, 0, 0)
for lap in laps:
if 'lap_data' in lap and lap['lap_data']:
self._set_driver_change(lap, lap['lap_data'][0]['date'])
laps = self._enhance_laps(laps, dt,
initial_status=initial_status,
current_status=current_status,
position_list=position_list,
_lap_list=laps,
total=total,
charging_process_list=charging_process_list,
forecast=forecast,
configuration=configuration,
)
return laps
@function_timer()
def _load_total(self, dt: pendulum.DateTime, *,
initial_status, current_status, position_list, lap_list,
_total=None, charging_process_list, forecast,
configuration: Configuration):
# TODO convert to finder
total = {}
total = self._enhance_total(total, dt,
initial_status=initial_status,
current_status=current_status,
position_list=position_list,
lap_list=lap_list,
_total=total,
charging_process_list=charging_process_list,
forecast=forecast,
configuration=configuration,
)
return total
@function_timer()
def _load_charging_processes(self, car_id: int, dt_start: pendulum.DateTime, dt_end: pendulum.DateTime, *,
initial_status, current_status, position_list, lap_list,
total, _charging_process_list=None, forecast,
configuration: Configuration) \
-> List[Dict[str, Any]]:
charging_processes = src.data_source.teslamate.get_car_charging_processes(car_id, dt_start, dt_end)
return self._enhance_charging_processes(charging_processes, dt_end,
initial_status=initial_status,
current_status=current_status,
position_list=position_list,
lap_list=lap_list,
total=total,
_charging_process_list=charging_processes,
forecast=forecast,
configuration=configuration,
)
@function_timer()
def _load_forecast(self, dt: pendulum.DateTime, *,
initial_status, current_status, position_list, lap_list,
total, charging_process_list, _forecast=None,
configuration: Configuration):
# TODO convert to finder
forecast = {}
# note that forecast doesn't care about all laps. So let's restrict it
exclude_first_laps = configuration.forecast_exclude_first_laps
use_last_laps = configuration.forecast_use_last_laps
lap_list_for_forecast = []
car_laps = [lap for lap in lap_list if lap['finished']] # care only about finished lap
logger.debug(f"{len(car_laps)} finished laps to analyze")
if not lap_list or len(car_laps) < (use_last_laps + exclude_first_laps):
logger.info(
f"not enough laps ({len(car_laps)}/{len(lap_list)}) from {use_last_laps} + {exclude_first_laps} needed)")
else:
lap_list_for_forecast = lap_list[-use_last_laps:]
if not lap_list[-1]['finished']: # include the unfinished lap as it affects calculations
lap_list_for_forecast.append(lap_list[-1])
forecast = self._enhance_forecast(total, dt,
initial_status=initial_status,
current_status=current_status,
position_list=position_list,
lap_list=lap_list_for_forecast, # note the limited list here
total=total,
charging_process_list=charging_process_list,
_forecast=forecast,
configuration=configuration,
)
return forecast
#####################################
# enhancers - add calculated fields #
#####################################
@classmethod
def _add_user_defined_calculated_field(cls, field_description, current_item: Dict[str, Any], *,
initial_status, current_status, position_list, lap_list,
total, charging_process_list, forecast,
configuration: Configuration,
current_item_index: Optional[int], now_dt: pendulum.DateTime):
"""
Add database defined calculated fields to current_item (helper, not to be called directly)
:param field_description: this is of type CalculatedField. I don't want to import because of DB dependency
:param current_item:
:param initial_status:
:param current_status:
:param position_list:
:param lap_list:
:param total:
:param charging_process_list:
:param forecast:
:param configuration:
:param current_item_index:
:param now_dt:
:return:
"""
name = field_description.name
code = field_description.calc_fn
value = eval(code, {}, {
'current_item': current_item,
'initial_status': initial_status,
'current_status': current_status,
'position_list': position_list,
'lap_list': lap_list,
'total': total,
'charging_process_list': charging_process_list,
'forecast': forecast,
'configuration': configuration,
'current_item_index': current_item_index,
'now_dt': now_dt
}) # calculate new value
current_item[name] = value
@function_timer()
def _enhance_status(self, status: Dict[str, Any], dt: pendulum.DateTime, *,
initial_status, _current_status=None, position_list, lap_list,
total, charging_process_list, forecast,
configuration: Configuration) -> Dict[str, Any]:
"""
Add calculated fields to the status
:param status: data to enhance
:return: the enhanced version (note it does in place enhancements, changes the parameter)
"""
# add hardcoded calculated fields
from src.data_processor.calculated_fields_status import add_calculated_fields
add_calculated_fields(current_item=status,
initial_status=initial_status,
current_status=status,
position_list=position_list,
lap_list=lap_list,
total=total,
charging_process_list=charging_process_list,
forecast=forecast,
configuration=configuration,
current_item_index=None,
now_dt=dt
)
# add user-defined (db) calculated fields
from src.db_models import CalculatedField
db_calculated_fields = CalculatedField.get_all_by_scope(CalculatedFieldScopeEnum.STATUS.value)
for db_calculated_field in db_calculated_fields:
self._add_user_defined_calculated_field(db_calculated_field, status,
initial_status=initial_status,
current_status=status,
position_list=position_list,
lap_list=lap_list,
total=total,
charging_process_list=charging_process_list,
forecast=forecast,
configuration=configuration,
current_item_index=None,
now_dt=dt
)
return status
@function_timer()
def _enhance_positions(self, positions: List[Dict[str, Any]], dt: pendulum.DateTime, *,
initial_status, current_status, _position_list=None, lap_list,
total, charging_process_list, forecast,
configuration: Configuration) -> List[Dict[str, Any]]:
# add calculated fields
# !! note this operation is expensive as it runs on lot of records
from src.data_processor.calculated_fields_positions import add_calculated_fields
from src.db_models import CalculatedField
db_calculated_fields = CalculatedField.get_all_by_scope(CalculatedFieldScopeEnum.POSITION.value)
for i in range(len(positions)):
add_calculated_fields(current_item=positions[i],
initial_status=initial_status,
current_status=current_status,
position_list=positions,
lap_list=lap_list,
total=total,
charging_process_list=charging_process_list,
forecast=forecast,
configuration=configuration,
current_item_index=i,
now_dt=dt
)
for field_description in db_calculated_fields:
self._add_user_defined_calculated_field(field_description, positions[i],
initial_status=initial_status,
current_status=current_status,
position_list=positions,
lap_list=lap_list,
total=total,
charging_process_list=charging_process_list,
forecast=forecast,
configuration=configuration,
current_item_index=i,
now_dt=dt,
)
return positions
@function_timer()
def _enhance_laps(self, laps: List[Dict[str, Any]], dt: pendulum.DateTime, *,
initial_status, current_status, position_list, _lap_list=None,
total, charging_process_list, forecast,
configuration: Configuration) -> List[Dict[str, Any]]:
from src.data_processor.calculated_fields_laps import add_calculated_fields
from src.db_models import CalculatedField
db_calculated_fields = CalculatedField.get_all_by_scope(CalculatedFieldScopeEnum.POSITION.value)
for i in range(len(laps)):
add_calculated_fields(current_item=laps[i],
initial_status=initial_status,
current_status=current_status,
position_list=position_list,
lap_list=laps,
total=total,
charging_process_list=charging_process_list,
forecast=forecast,
configuration=configuration,
current_item_index=i,
now_dt=dt
)
for field_description in db_calculated_fields:
self._add_user_defined_calculated_field(field_description, laps[i],
initial_status=initial_status,
current_status=current_status,
position_list=position_list,
lap_list=laps,
total=total,
charging_process_list=charging_process_list,
forecast=forecast,
configuration=configuration,
current_item_index=i,
now_dt=dt,
)
return laps
@function_timer()
def _enhance_total(self, total: Dict[str, Any], dt: pendulum.DateTime, *,
initial_status, current_status, position_list, lap_list,
_total=None, charging_process_list, forecast,
configuration: Configuration) -> Dict[str, Any]:
"""
Add calculated fields for total
:param total: data to enhance
:return: the enhanced version (note it does in place enhancements, changes the parameter)
"""
# add hardcoded calculated fields
from src.data_processor.calculated_fields_total import add_calculated_fields
add_calculated_fields(current_item=total,
initial_status=initial_status,
current_status=current_status,
position_list=position_list,
lap_list=lap_list,
total=total,
charging_process_list=charging_process_list,
forecast=forecast,
configuration=configuration,
current_item_index=None,
now_dt=dt
)
# add user-defined (db) calculated fields
from src.db_models import CalculatedField
db_calculated_fields = CalculatedField.get_all_by_scope(CalculatedFieldScopeEnum.TOTAL.value)
for db_calculated_field in db_calculated_fields:
self._add_user_defined_calculated_field(db_calculated_field, total,
initial_status=initial_status,
current_status=current_status,
position_list=position_list,
lap_list=lap_list,
total=total,
charging_process_list=charging_process_list,
forecast=forecast,
configuration=configuration,
current_item_index=None,
now_dt=dt
)
return total
@function_timer()
def _enhance_charging_processes(self, charging_processes: List[Dict[str, Any]], dt: pendulum.DateTime, *,
initial_status, current_status, position_list, lap_list,
total, forecast, _charging_process_list,
configuration: Configuration) -> List[Dict[str, Any]]:
"""
Add calculated fields for charigng processes
:param forecast:
:return: the enhanced version (note it does in place enhancements, changes the parameter)
"""
from src.data_processor.calculated_fields_charges import add_calculated_fields
from src.db_models import CalculatedField
db_calculated_fields = CalculatedField.get_all_by_scope(CalculatedFieldScopeEnum.POSITION.value)
for i in range(len(charging_processes)):
add_calculated_fields(current_item=charging_processes[i],
initial_status=initial_status,
current_status=current_status,
position_list=position_list,
lap_list=lap_list,
total=total,
charging_process_list=charging_processes,
forecast=forecast,
configuration=configuration,
current_item_index=i,
now_dt=dt
)
for field_description in db_calculated_fields:
self._add_user_defined_calculated_field(field_description, charging_processes[i],
initial_status=initial_status,
current_status=current_status,
position_list=position_list,
lap_list=lap_list,
total=total,
charging_process_list=charging_processes,
forecast=forecast,
configuration=configuration,
current_item_index=i,
now_dt=dt,
)
return charging_processes
@function_timer()
def _enhance_forecast(self, forecast: Dict[str, Any], dt: pendulum.DateTime, *,
initial_status, current_status, position_list, lap_list,
total, charging_process_list, _forecast=None,
configuration: Configuration) -> Dict[str, Any]:
"""
Add calculated fields for forecast
:param forecast:
:return: the enhanced version (note it does in place enhancements, changes the parameter)
"""
# add hardcoded calculated fields
from src.data_processor.calculated_fields_forecast import add_calculated_fields
add_calculated_fields(current_item=forecast,
initial_status=initial_status,
current_status=current_status,
position_list=position_list,
lap_list=lap_list,
total=total,
charging_process_list=charging_process_list,
forecast=forecast,
configuration=configuration,
current_item_index=None,
now_dt=dt
)
# add user-defined (db) calculated fields
from src.db_models import CalculatedField
db_calculated_fields = CalculatedField.get_all_by_scope(CalculatedFieldScopeEnum.FORECAST.value)
for db_calculated_field in db_calculated_fields:
self._add_user_defined_calculated_field(db_calculated_field, forecast,
initial_status=initial_status,
current_status=current_status,
position_list=position_list,
lap_list=lap_list,
total=total,
charging_process_list=charging_process_list,
forecast=forecast,
configuration=configuration,
current_item_index=None,
now_dt=dt
)
return forecast
##############
# formatters #
##############
@classmethod
def _format_dict(cls, d: Dict[str, Any], label_group: LabelFormatGroupEnum,
dt: Optional[pendulum.DateTime], *, record_id: Optional[str] = None) -> JsonLabelGroup:
"""
Generic function to format dict into group of labels
:param d: data to be formatted
:param label_group: group of labels/title
:param dt:
:param record_id: if provided, it's passed to the ui. i.e. for purpose of table headers
:return: formatted structure
"""
from src.db_models import LabelGroup, LabelFormat
from src.data_processor.labels import generate_labels
db_label_group: LabelGroup = src.db_models.LabelGroup.query.filter_by(code=label_group.value).first()
formatted_items: List[JsonLabelItem] = generate_labels(LabelFormat.get_all_by_group(label_group.value),
d, dt)
return JsonLabelGroup(title=db_label_group.title, items=formatted_items, record_id=record_id)
def _load_status_formatted(self, status: Dict[str, Any], total: Dict[str, Any], forecast: Dict[str, Any],
dt: Optional[pendulum.DateTime]) -> JsonStatusResponse:
return JsonStatusResponse(
lat=status['latitude'] if 'latitude' in status else 0,
lon=status['longitude'] if 'longitude' in status else 0,
mapLabels=self._format_dict(status, LabelFormatGroupEnum.MAP, dt),
statusLabels=self._format_dict(status, LabelFormatGroupEnum.STATUS, dt),
totalLabels=self._format_dict(total, LabelFormatGroupEnum.TOTAL, dt), # TODO partial hack
forecastLabels=self._format_dict(forecast, LabelFormatGroupEnum.FORECAST, dt),
)
def _load_laps_formatted(self, laps: List[Dict[str, Any]], dt: Optional[pendulum.DateTime]) -> JsonLapsResponse:
from src import configuration
recent_lap = laps[-1] if laps else None
prev_lap_list = laps[-configuration.show_previous_laps - 1:-1] if len(laps) > 0 else []
formatted_prev_laps = [self._format_dict(lap, LabelFormatGroupEnum.PREVIOUS_LAPS, dt,
record_id=str(lap['lap_id'])) for lap in prev_lap_list]
if configuration.previous_laps_table_reversed:
formatted_prev_laps.reverse()
formatted_recent_lap = self._format_dict(recent_lap, LabelFormatGroupEnum.RECENT_LAP, dt,
record_id=str(recent_lap['lap_id'])) if recent_lap else None
return JsonLapsResponse(
previous=JsonResponseListWrapper(__root__=formatted_prev_laps),
recent=formatted_recent_lap
)
def _load_charging_process_list_formatted(self, charging_process_list: List[Dict[str, Any]],
dt: Optional[pendulum.DateTime]) -> JsonResponseListWrapper:
from src import configuration
formatted_list = [self._format_dict(chp, LabelFormatGroupEnum.CHARGING, dt, record_id=str(rec_id))
for rec_id, chp in enumerate(charging_process_list, start=1)] # simulate id using index
if configuration.charging_table_reversed:
formatted_list.reverse()
return JsonResponseListWrapper(
__root__=formatted_list
)
#################################################
# update calls (to be used from background jobs #
#################################################
@function_timer()
def update_status(self):
"""
update current status. May be called from background job
"""
from src import configuration
car_id = configuration.car_id
now = pendulum.now(tz='utc')
if not self.initial_status_raw:
self.initial_status_raw = self._update_initial_status(car_id, configuration.start_time) # make sure there is initial status loaded
status = self._load_status_raw(car_id, now,
initial_status=self.initial_status_raw,
_current_status=self.current_status_raw,
position_list=self.car_positions_raw,
lap_list=self.lap_list_raw,
total=self.total_raw,
charging_process_list=self.charging_process_list_raw,
forecast=self.forecast_raw,
configuration=configuration, )
self.current_status_raw = status
# just to make sure all data exist before rendering if no bg jobs are allowed
if not configuration or not self.total_raw or not self.forecast_raw:
self.update_positions_laps_forecast()
self.current_status_formatted = self._load_status_formatted(self.current_status_raw, self.total_raw,
self.forecast_raw, now)
@function_timer()
def update_positions_laps_forecast(self):
"""
update rest of the data (besides status. May be called from background job
"""
from src import configuration
now = pendulum.now(tz='utc')
dt_end = configuration.start_time.add(hours=configuration.hours)
positions = self._load_positions(
configuration.car_id, configuration.start_time, dt_end,
initial_status=self.initial_status_raw,
current_status=self.current_status_raw,
_position_list=self.car_positions_raw,
lap_list=self.lap_list_raw,
total=self.total_raw,
charging_process_list=self.charging_process_list_raw,
forecast=self.forecast_raw,
configuration=configuration, )
self.car_positions_raw = positions
# no formatting for positions
# find and update laps
self.lap_list_raw = self._load_laps(
positions, now,
initial_status=self.initial_status_raw,
current_status=self.current_status_raw,
position_list=self.car_positions_raw,
_lap_list=self.lap_list_raw,
total=self.total_raw,
charging_process_list=self.charging_process_list_raw,
forecast=self.forecast_raw,
configuration=configuration,
)
# load charging
self.charging_process_list_raw = self._load_charging_processes(
configuration.car_id, configuration.start_time, dt_end,
initial_status=self.initial_status_raw,
current_status=self.current_status_raw,
position_list=self.car_positions_raw,
lap_list=self.lap_list_raw,
total=self.total_raw,
_charging_process_list=self.charging_process_list_raw,
forecast=self.forecast_raw,
configuration=configuration,
)
# load total
self.total_raw = self._load_total(
now,
initial_status=self.initial_status_raw,
current_status=self.current_status_raw,
position_list=self.car_positions_raw,
lap_list=self.lap_list_raw,
_total=self.total_raw,
charging_process_list=self.charging_process_list_raw,
forecast=self.forecast_raw,
configuration=configuration,
)
# load forecast
self.forecast_raw = self._load_forecast(
now,
initial_status=self.initial_status_raw,
current_status=self.current_status_raw,
position_list=self.car_positions_raw,
lap_list=self.lap_list_raw,
total=self.total_raw,
charging_process_list=self.charging_process_list_raw,
_forecast=self.forecast_raw,
configuration=configuration,
)
# just to make sure all data exist before rendering if no bg jobs are allowed
if not configuration or not self.current_status_raw:
self.update_status
# generate the formatted form after, when all are updated
self.lap_list_formatted = self._load_laps_formatted(self.lap_list_raw, now)
self.charging_process_list_formatted = \
self._load_charging_process_list_formatted(self.charging_process_list_raw, now)
self.total_formatted = self._format_dict(self.total_raw, LabelFormatGroupEnum.TOTAL, now)
self.forecast_formatted = self._format_dict(self.forecast_raw, LabelFormatGroupEnum.FORECAST, now)
###########
# getters #
###########
def get_status_raw(self) -> Dict[str, Any]:
"""
get current status raw
:return: retrieved data
"""
from src import configuration
if not configuration.update_run_background or not self.current_status_raw:
self.update_status()
return self.current_status_raw
def get_status_formatted(self) -> JsonStatusResponse:
"""
get current status raw
:return: retrieved data
"""
from src import configuration
if not configuration.update_run_background or not self.current_status_formatted:
self.update_status()
out = self.current_status_formatted
out.totalLabels = self.total_formatted # TODO this is not nice hack
return out
def get_positions_raw(self) -> List[Dict[str, Any]]:
"""
get current positions raw
:return: retrieved data
"""
from src import configuration
if not configuration.update_run_background or not self.car_positions_raw:
self.update_positions_laps_forecast()
return self.car_positions_raw
def get_laps_raw(self) -> List[Dict[str, Any]]:
"""
get laps raw form
:return: retrieved data
"""
from src import configuration
if not configuration.update_run_background or not self.lap_list_raw:
self.update_positions_laps_forecast()
return self.lap_list_raw
def get_laps_formatted(self) -> JsonLapsResponse:
"""
get laps formatted for UI
:return: retrieved data
"""
from src import configuration
if not configuration.update_run_background or not self.lap_list_formatted:
self.update_positions_laps_forecast()
return self.lap_list_formatted
def get_total_raw(self) -> Dict[str, Any]:
"""
get total raw
:return: retrieved data
"""
from src import configuration
if not configuration.update_run_background or not self.total_raw:
self.update_positions_laps_forecast()
return self.total_raw
def get_total_formatted(self) -> JsonLabelGroup:
"""
get total formatted
:return: retrieved data
"""
from src import configuration
if not configuration.update_run_background or not self.total_formatted:
self.update_positions_laps_forecast()
return self.total_formatted
def get_charging_process_list_raw(self) -> List[Dict[str, Any]]:
"""
get charging processes raw
:return: retrieved data
"""
from src import configuration
if not configuration.update_run_background or not self.charging_process_list_raw:
self.update_positions_laps_forecast()
return self.charging_process_list_raw
def get_charging_process_list_formatted(self) -> JsonResponseListWrapper:
"""
get charging processes formatted
:return: retrieved data
"""
from src import configuration
if not configuration.update_run_background or not self.charging_process_list_formatted:
self.update_positions_laps_forecast()
return self.charging_process_list_formatted
def get_forecast_raw(self) -> Dict[str, Any]:
"""
get forecast raw
:return: retrieved data
"""
from src import configuration
if not configuration.update_run_background or not self.forecast_raw:
self.update_positions_laps_forecast()
return self.forecast_raw
def get_forecast_formatted(self) -> JsonLabelGroup:
"""
get forecast formatted
:return: retrieved data
"""
from src import configuration
if not configuration.update_run_background or not self.forecast_formatted:
self.update_positions_laps_forecast()
return self.forecast_formatted
# TODO add charging in better way
def get_car_chargings(self, lap_id: int):
from src import configuration
if not configuration.update_run_background or not self.lap_list_raw:
self.update_positions_laps_forecast()
lap = self.lap_list_raw[lap_id]
pit_start = lap['pit_start_time']
pit_end = lap['pit_end_time']
from src import configuration # imports global configuration
return src.data_source.teslamate.get_car_chargings(configuration.car_id, pit_start, pit_end)
################################
# static snapshot for datetime #
################################
@function_timer()
def get_static_snapshot(self, dt_end: pendulum.DateTime) -> JsonStaticSnapshot:
"""
Get system snapshot for specific date and time
:param dt_end:
:return:
"""
from src import configuration
snapshot = JsonStaticSnapshot()
snapshot.initial_status_raw = self._update_initial_status(configuration.car_id, configuration.start_time)
for i in range(2): # repeat twice in case there are dependent fields
snapshot.current_status_raw = self._load_status_raw(
configuration.car_id, dt_end,
initial_status=snapshot.initial_status_raw,
_current_status=snapshot.current_status_raw,
position_list=snapshot.car_positions_raw,
lap_list=snapshot.lap_list_raw,
total=snapshot.total_raw,
charging_process_list=snapshot.charging_process_list_raw,
forecast=snapshot.forecast_raw,
configuration=configuration,
)
snapshot.car_positions_raw = self._load_positions(
configuration.car_id, configuration.start_time, dt_end,
initial_status=snapshot.initial_status_raw,
current_status=snapshot.current_status_raw,
_position_list=snapshot.car_positions_raw,
lap_list=snapshot.lap_list_raw,
total=snapshot.total_raw,
charging_process_list=snapshot.charging_process_list_raw,
forecast=snapshot.forecast_raw,
configuration=configuration,
)
snapshot.lap_list_raw = self._load_laps(
snapshot.car_positions_raw, dt_end,
initial_status=snapshot.initial_status_raw,
current_status=snapshot.current_status_raw,
position_list=snapshot.car_positions_raw,
_lap_list=snapshot.lap_list_raw,
total=snapshot.total_raw,
charging_process_list=snapshot.charging_process_list_raw,
forecast=snapshot.forecast_raw,
configuration=configuration,
)
snapshot.charging_process_list_raw = self._load_charging_processes(
configuration.car_id, configuration.start_time, dt_end,
initial_status=snapshot.initial_status_raw,
current_status=snapshot.current_status_raw,
position_list=snapshot.car_positions_raw,
lap_list=snapshot.lap_list_raw,
total=snapshot.total_raw,
_charging_process_list=snapshot.charging_process_list_raw,
forecast=snapshot.forecast_raw,
configuration=configuration,
)
snapshot.total_raw = self._load_total(
dt_end,
initial_status=snapshot.initial_status_raw,
current_status=snapshot.current_status_raw,
position_list=snapshot.car_positions_raw,
lap_list=snapshot.lap_list_raw,
_total=snapshot.total_raw,
charging_process_list=snapshot.charging_process_list_raw,
forecast=snapshot.forecast_raw,
configuration=configuration,
)
snapshot.forecast_raw = self._load_forecast(
dt_end,
initial_status=snapshot.initial_status_raw,
current_status=snapshot.current_status_raw,
position_list=snapshot.car_positions_raw,
lap_list=snapshot.lap_list_raw,
total=snapshot.total_raw,
charging_process_list=snapshot.charging_process_list_raw,
_forecast=snapshot.forecast_raw,
configuration=configuration,
)
snapshot.current_status_formatted = self._load_status_formatted(snapshot.current_status_raw,
snapshot.total_raw,
snapshot.forecast_raw, dt_end)
snapshot.lap_list_formatted = self._load_laps_formatted(snapshot.lap_list_raw, dt_end)
snapshot.charging_process_list_formatted = self._load_charging_process_list_formatted(
snapshot.charging_process_list_raw, dt_end)
snapshot.total_formatted = self._format_dict(snapshot.total_raw, LabelFormatGroupEnum.TOTAL, dt_end)
snapshot.current_status_formatted.totalLabels = snapshot.total_formatted # to pass it to appropriate spot
snapshot.forecast_formatted = self._format_dict(snapshot.forecast_raw, LabelFormatGroupEnum.FORECAST, dt_end)
return snapshot
####################
# other UI helpers #
####################
# Not needed any more
# def describe_status_fields(self) -> List[DatabaseFieldDescription]:
# from src.db_models import LabelFormat, CalculatedField
#
# # TODO for the development time, update on every try if not _current_status_raw:
# self.update_status()
#
# out = FieldDescriptionList(items=[])
#
# database_raw_fields = get_database_fields_status()
# hardcoded_calculated_fields = {cf.name: cf for cf in
# src.data_processor.calculated_fields_status.get_calculated_fields_status()}
# database_calculated_fields = {cf.name: cf for cf in
# CalculatedField.get_all_by_scope(CalculatedFieldScopeEnum.STATUS.value)}
#
# # remember the order (custom, hardcoded, db) as the names may be overridden
# for key in self._current_status_raw:
# if key in hardcoded_calculated_fields:
# cf = hardcoded_calculated_fields[key]
# out.items.append(DatabaseFieldDescription(
# name=cf.name, description=cf.description, return_type=cf.return_type))
# elif key in database_raw_fields:
# out.items.append(database_raw_fields[key])
# elif key in database_calculated_fields:
# cf = database_calculated_fields[key]
# out.items.append(DatabaseFieldDescription(
# name=cf.name, description=cf.description, return_type=cf.return_type))
# else:
# # fallback
# out.items.append(DatabaseFieldDescription(name=key, description="__fallback__"))
#
# return out
def test_custom_calculated_field(self, field_name: str, scope_code: str, function_code: str, return_type: str) \
-> Optional[Any]:
from src.db_models import CalculatedField
from src import configuration # imports global configuration
from src import configuration
if not configuration.update_run_background or not self.current_status_raw:
self.update_status()
field = CalculatedField(
id=-1, name=field_name, description="", return_type=return_type, calc_fn=function_code, scope_id=1 # TODO
)
current_item = {}
# TODO generate current item and index based on the scope
self._add_user_defined_calculated_field(field, current_item,
initial_status=self.initial_status_raw,
current_status=self.current_status_raw,
position_list=self.car_positions_raw,
lap_list=self.lap_list_raw,
charging_process_list=self.charging_process_list_raw,
total=self.total_raw,
forecast=self.forecast_raw,
current_item_index=None,
configuration=configuration,
now_dt=pendulum.now(tz='utc')
)
return current_item
def test_custom_label_format(self, group_code: str, field: str, label: str, format_fn: str, format_str: str,
unit: str, default: str) -> List[JsonLabelItem]:
from src.db_models import LabelFormat
from src import configuration # imports global configuration
from src import configuration
if not configuration.update_run_background or not self.current_status_raw:
self.update_status()
label_format = LabelFormat(field=field, label=label, format_function=format_fn, format=format_str, unit=unit,
default=default, group_id=1) # TODO
# TODO generate current item and index based on the scope
formatted_items = generate_labels([label_format],
self.current_status_raw, pendulum.now(tz='utc'))
return formatted_items
# let's have just one singleton to be used
data_processor = DataProcessor()
| 49.898271
| 155
| 0.556126
|
65a88d1023787306aa9049cdc3bbaba633827c47
| 242
|
py
|
Python
|
CodeForces/Round555Div3/A.py
|
takaaki82/Java-Lessons
|
c4f11462bf84c091527dde5f25068498bfb2cc49
|
[
"MIT"
] | 1
|
2018-11-25T04:15:45.000Z
|
2018-11-25T04:15:45.000Z
|
CodeForces/Round555Div3/A.py
|
takaaki82/Java-Lessons
|
c4f11462bf84c091527dde5f25068498bfb2cc49
|
[
"MIT"
] | null | null | null |
CodeForces/Round555Div3/A.py
|
takaaki82/Java-Lessons
|
c4f11462bf84c091527dde5f25068498bfb2cc49
|
[
"MIT"
] | 2
|
2018-08-08T13:01:14.000Z
|
2018-11-25T12:38:36.000Z
|
N = int(input())
def f(x):
x += 1
x = str(x)
while x[-1] == "0":
x = x[:-1]
return x
prev = set()
prev.add(N)
n = N
while int(f(n)) not in prev:
tmp = int(f(n))
prev.add(tmp)
n = tmp
print(len(prev))
| 11
| 28
| 0.450413
|
79be4714235b94387059ea08d71c56821b5d4dba
| 397
|
py
|
Python
|
back_front/back_front/wsgi.py
|
LaraEvdokimova/Aviato
|
93084b70f5fae86997228878fd3b093b588d6d46
|
[
"Apache-2.0"
] | null | null | null |
back_front/back_front/wsgi.py
|
LaraEvdokimova/Aviato
|
93084b70f5fae86997228878fd3b093b588d6d46
|
[
"Apache-2.0"
] | null | null | null |
back_front/back_front/wsgi.py
|
LaraEvdokimova/Aviato
|
93084b70f5fae86997228878fd3b093b588d6d46
|
[
"Apache-2.0"
] | 1
|
2020-06-06T15:54:05.000Z
|
2020-06-06T15:54:05.000Z
|
"""
WSGI config for back_front project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'back_front.settings')
application = get_wsgi_application()
| 23.352941
| 78
| 0.788413
|
eb9ebff865dc0c09c66751b6b918907cbc7e008d
| 2,244
|
py
|
Python
|
tokens/views.py
|
salimsuleiman/plt
|
b0a4fdbb5e765518e44440f12ef306a7aa60c382
|
[
"MIT"
] | 2
|
2022-03-25T09:22:54.000Z
|
2022-03-30T19:57:42.000Z
|
tokens/views.py
|
salimsuleiman/plt
|
b0a4fdbb5e765518e44440f12ef306a7aa60c382
|
[
"MIT"
] | null | null | null |
tokens/views.py
|
salimsuleiman/plt
|
b0a4fdbb5e765518e44440f12ef306a7aa60c382
|
[
"MIT"
] | 1
|
2022-03-30T19:58:54.000Z
|
2022-03-30T19:58:54.000Z
|
from rest_framework.response import Response
from rest_framework.decorators import api_view
from .models import Token, TokenBlackList
import jwt
from rest_framework import status
from .serializers import TokenSerializer
from wallets.serializers import WalletSerializers
from users.serializers import UserSerializer
from transactions.serializers import TransactionSerializer
from .validations.token_validator import validate_tokens_input
from transactions.models import Transaction
@api_view(['PUT'])
@validate_tokens_input
def refreshToken(request, key, refresh_token):
token = Token.objects.filter(key=key).first()
if token is None:
return Response({'error':'Invalid Authentication Key'}, status.HTTP_404_NOT_FOUND)
try:
try:
refresh_decoded_token = jwt.decode(refresh_token, "secret", algorithms=["HS256"])
except jwt.exceptions.DecodeError:
return Response({'error':'cannot decode. failed validation'}, status.HTTP_400_BAD_REQUEST)
except jwt.exceptions.InvalidSignatureError:
return Response({'error':'JWT Invalid Signature'}, status.HTTP_400_BAD_REQUEST)
if not token.check_token_hash(refresh_decoded_token['keyHash']):
return Response({'error': 'R-token is invalid'}, status.HTTP_400_BAD_REQUEST)
TokenBlackList.objects.create(token=token.refresh_token)
token.refresh_t()
TSerializer = TokenSerializer(token, many=False)
WSerializer = WalletSerializers(token.user.wallet, many=False)
USerializer = UserSerializer(token.user, many=False)
Tx1Serializer = TransactionSerializer(Transaction.objects.filter(sender=token.user.wallet.receive_key), many=True)
Tx2Serializer = TransactionSerializer(Transaction.objects.filter(receiver=token.user.wallet.receive_key), many=True)
return Response({'token': TSerializer.data,
'wallet': {
'owner': USerializer.data,
'instance': WSerializer.data,
'transactions': {
'sended': Tx1Serializer.data,
'received': Tx2Serializer.data,
}
}}, status.HTTP_202_ACCEPTED)
| 44.88
| 120
| 0.697415
|
fd9d2340fc4a7fd50aee82e5af243013c65aa011
| 136
|
py
|
Python
|
dnsagent/__init__.py
|
account-login/dnsagent
|
c27235c5234d13a0f5d6e3b7735ff35d73d121d0
|
[
"MIT"
] | 2
|
2020-02-05T08:49:31.000Z
|
2020-03-19T19:04:46.000Z
|
dnsagent/__init__.py
|
account-login/dnsagent
|
c27235c5234d13a0f5d6e3b7735ff35d73d121d0
|
[
"MIT"
] | null | null | null |
dnsagent/__init__.py
|
account-login/dnsagent
|
c27235c5234d13a0f5d6e3b7735ff35d73d121d0
|
[
"MIT"
] | null | null | null |
"""
A configurable dns proxy powered by twisted.
"""
import logging
logger = logging.getLogger(__name__)
__version__ = '0.2.1.dev0'
| 12.363636
| 44
| 0.720588
|
8503993b399c99888607e58d0bb04a96efebd220
| 16,178
|
py
|
Python
|
Tools/Scripts/webkitpy/w3c/test_converter_unittest.py
|
igordmn/blink-typo
|
4b25f34cb6580373beb900fd46886e05604fe6c3
|
[
"BSD-3-Clause"
] | null | null | null |
Tools/Scripts/webkitpy/w3c/test_converter_unittest.py
|
igordmn/blink-typo
|
4b25f34cb6580373beb900fd46886e05604fe6c3
|
[
"BSD-3-Clause"
] | null | null | null |
Tools/Scripts/webkitpy/w3c/test_converter_unittest.py
|
igordmn/blink-typo
|
4b25f34cb6580373beb900fd46886e05604fe6c3
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import os
import re
import unittest
from webkitpy.common.host import Host
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
from webkitpy.w3c.test_converter import _W3CTestConverter
DUMMY_FILENAME = 'dummy.html'
DUMMY_PATH = 'dummy/testharness/path'
class W3CTestConverterTest(unittest.TestCase):
# FIXME: When we move to using a MockHost, this method should be removed, since
# then we can just pass in a dummy dir path
def fake_dir_path(self, dirname):
filesystem = Host().filesystem
webkit_root = WebKitFinder(filesystem).webkit_base()
return filesystem.abspath(filesystem.join(webkit_root, "LayoutTests", "css", dirname))
def test_read_prefixed_property_list(self):
""" Tests that the current list of properties requiring the -webkit- prefix load correctly """
# FIXME: We should be passing in a MockHost here ...
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
prop_list = converter.prefixed_properties
self.assertTrue(prop_list, 'No prefixed properties found')
def test_convert_for_webkit_nothing_to_convert(self):
""" Tests convert_for_webkit() using a basic test that has nothing to convert """
test_html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>CSS Test: DESCRIPTION OF TEST</title>
<link rel="author" title="NAME_OF_AUTHOR"
href="mailto:EMAIL OR http://CONTACT_PAGE"/>
<link rel="help" href="RELEVANT_SPEC_SECTION"/>
<meta name="assert" content="TEST ASSERTION"/>
<style type="text/css"><![CDATA[
CSS FOR TEST
]]></style>
</head>
<body>
CONTENT OF TEST
</body>
</html>
"""
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_no_conversion_happened(converted, test_html)
def test_convert_for_webkit_harness_only(self):
""" Tests convert_for_webkit() using a basic JS test that uses testharness.js only and has no prefixed properties """
test_html = """<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
</head>
"""
fake_dir_path = self.fake_dir_path("harnessonly")
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
converter.feed(test_html)
converter.close()
converted = converter.output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, [])
def test_convert_for_webkit_properties_only(self):
""" Tests convert_for_webkit() using a test that has 2 prefixed properties: 1 in a style block + 1 inline style """
test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">
#block1 { @test0@: propvalue; }
</style>
</head>
<body>
<div id="elem1" style="@test1@: propvalue;"></div>
</body>
</html>
"""
fake_dir_path = self.fake_dir_path('harnessandprops')
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
test_content = self.generate_test_content(converter.prefixed_properties, 1, test_html)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_content[1])
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, test_content[0])
def test_convert_for_webkit_harness_and_properties(self):
""" Tests convert_for_webkit() using a basic JS test that uses testharness.js and testharness.css and has 4 prefixed properties: 3 in a style block + 1 inline style """
test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">
#block1 { @test0@: propvalue; }
#block2 { @test1@: propvalue; }
#block3 { @test2@: propvalue; }
</style>
</head>
<body>
<div id="elem1" style="@test3@: propvalue;"></div>
</body>
</html>
"""
fake_dir_path = self.fake_dir_path('harnessandprops')
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
oc = OutputCapture()
oc.capture_output()
try:
test_content = self.generate_test_content(converter.prefixed_properties, 2, test_html)
converter.feed(test_content[1])
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, test_content[0])
def test_convert_test_harness_paths(self):
""" Tests convert_testharness_paths() with a test that uses multiple testharness files """
test_html = """<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/resources/WebIDLParser.js"></script>
<script src="/resources/idlharness.js"></script>
</head>
"""
fake_dir_path = self.fake_dir_path('testharnesspaths')
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 4, 1)
def test_convert_vendor_prefix_js_paths(self):
test_html = """<head>
<script src="/common/vendor-prefix.js">
</head>
"""
fake_dir_path = self.fake_dir_path('adapterjspaths')
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
new_html = BeautifulSoup(converted[1])
# Verify the original paths are gone, and the new paths are present.
orig_path_pattern = re.compile('\"/common/vendor-prefix.js')
self.assertEquals(len(new_html.findAll(src=orig_path_pattern)), 0, 'vendor-prefix.js path was not converted')
resources_dir = converter.path_from_webkit_root("LayoutTests", "resources")
new_relpath = os.path.relpath(resources_dir, fake_dir_path)
relpath_pattern = re.compile(new_relpath)
self.assertEquals(len(new_html.findAll(src=relpath_pattern)), 1, 'vendor-prefix.js relative path not correct')
def test_convert_prefixed_properties(self):
""" Tests convert_prefixed_properties() file that has 20 properties requiring the -webkit- prefix:
10 in one style block + 5 in another style
block + 5 inline styles, including one with multiple prefixed properties.
The properties in the test content are in all sorts of wack formatting.
"""
test_html = """<html>
<style type="text/css"><![CDATA[
.block1 {
width: 300px;
height: 300px
}
.block2 {
@test0@: propvalue;
}
.block3{@test1@: propvalue;}
.block4 { @test2@:propvalue; }
.block5{ @test3@ :propvalue; }
#block6 { @test4@ : propvalue; }
#block7
{
@test5@: propvalue;
}
#block8 { @test6@: propvalue; }
#block9:pseudo
{
@test7@: propvalue;
@test8@: propvalue propvalue propvalue;
}
]]></style>
</head>
<body>
<div id="elem1" style="@test9@: propvalue;"></div>
<div id="elem2" style="propname: propvalue; @test10@ : propvalue; propname:propvalue;"></div>
<div id="elem2" style="@test11@: propvalue; @test12@ : propvalue; @test13@ :propvalue;"></div>
<div id="elem3" style="@test14@:propvalue"></div>
</body>
<style type="text/css"><![CDATA[
.block10{ @test15@: propvalue; }
.block11{ @test16@: propvalue; }
.block12{ @test17@: propvalue; }
#block13:pseudo
{
@test18@: propvalue;
@test19@: propvalue;
}
]]></style>
</html>
"""
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
test_content = self.generate_test_content(converter.prefixed_properties, 20, test_html)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_content[1])
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_prefixed_properties(converted, test_content[0])
def test_hides_all_instructions_for_manual_testers(self):
test_html = """<body>
<h1 class="instructions">Hello manual tester!</h1>
<p class="instructions some_other_class">This is how you run this test.</p>
<p style="willbeoverwritten" class="instructions">...</p>
<doesntmatterwhichtagitis class="some_other_class instructions">...</p>
<p>Legit content may contain the instructions string</p>
</body>
"""
expected_test_html = """<body>
<h1 class="instructions" style="display:none">Hello manual tester!</h1>
<p class="instructions some_other_class" style="display:none">This is how you run this test.</p>
<p class="instructions" style="display:none">...</p>
<doesntmatterwhichtagitis class="some_other_class instructions" style="display:none">...</p>
<p>Legit content may contain the instructions string</p>
</body>
"""
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.assertEqual(converted[1], expected_test_html)
def test_convert_attributes_if_needed(self):
""" Tests convert_attributes_if_needed() using a reference file that has some relative src paths """
test_html = """<html>
<head>
<script src="../../some-script.js"></script>
<style src="../../../some-style.css"></style>
</head>
<body>
<img src="../../../../some-image.jpg">
</body>
</html>
"""
test_reference_support_info = {'reference_relpath': '../', 'files': ['../../some-script.js', '../../../some-style.css', '../../../../some-image.jpg'], 'elements': ['script', 'style', 'img']}
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, test_reference_support_info)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_reference_relative_paths(converted, test_reference_support_info)
def verify_conversion_happened(self, converted):
self.assertTrue(converted, "conversion didn't happen")
def verify_no_conversion_happened(self, converted, original):
self.assertEqual(converted[1], original, 'test should not have been converted')
def verify_test_harness_paths(self, converter, converted, test_path, num_src_paths, num_href_paths):
if isinstance(converted, basestring):
converted = BeautifulSoup(converted)
resources_dir = converter.path_from_webkit_root("LayoutTests", "resources")
# Verify the original paths are gone, and the new paths are present.
orig_path_pattern = re.compile('\"/resources/testharness')
self.assertEquals(len(converted.findAll(src=orig_path_pattern)), 0, 'testharness src path was not converted')
self.assertEquals(len(converted.findAll(href=orig_path_pattern)), 0, 'testharness href path was not converted')
new_relpath = os.path.relpath(resources_dir, test_path)
relpath_pattern = re.compile(new_relpath)
self.assertEquals(len(converted.findAll(src=relpath_pattern)), num_src_paths, 'testharness src relative path not correct')
self.assertEquals(len(converted.findAll(href=relpath_pattern)), num_href_paths, 'testharness href relative path not correct')
def verify_prefixed_properties(self, converted, test_properties):
self.assertEqual(len(set(converted[0])), len(set(test_properties)), 'Incorrect number of properties converted')
for test_prop in test_properties:
self.assertTrue((test_prop in converted[1]), 'Property ' + test_prop + ' not found in converted doc')
def verify_reference_relative_paths(self, converted, reference_support_info):
idx = 0
for path in reference_support_info['files']:
expected_path = re.sub(reference_support_info['reference_relpath'], '', path, 1)
element = reference_support_info['elements'][idx]
expected_tag = '<' + element + ' src=\"' + expected_path + '\">'
self.assertTrue(expected_tag in converted[1], 'relative path ' + path + ' was not converted correcty')
idx += 1
def generate_test_content(self, full_property_list, num_test_properties, html):
"""Inserts properties requiring a -webkit- prefix into the content, replacing \'@testXX@\' with a property."""
test_properties = []
count = 0
while count < num_test_properties:
test_properties.append(full_property_list[count])
count += 1
# Replace the tokens in the testhtml with the test properties. Walk backward
# through the list to replace the double-digit tokens first
index = len(test_properties) - 1
while index >= 0:
# Use the unprefixed version
test_prop = test_properties[index].replace('-webkit-', '')
# Replace the token
html = html.replace('@test' + str(index) + '@', test_prop)
index -= 1
return (test_properties, html)
| 37.623256
| 198
| 0.682594
|
0df6348f6b637f5b31e1a8e22c7590dac5c47d31
| 3,967
|
py
|
Python
|
alipay/aop/api/request/AlipayPayCodecQrcodecacheAddRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/request/AlipayPayCodecQrcodecacheAddRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/request/AlipayPayCodecQrcodecacheAddRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayPayCodecQrcodecacheAddModel import AlipayPayCodecQrcodecacheAddModel
class AlipayPayCodecQrcodecacheAddRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayPayCodecQrcodecacheAddModel):
self._biz_content = value
else:
self._biz_content = AlipayPayCodecQrcodecacheAddModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.pay.codec.qrcodecache.add'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.358621
| 148
| 0.644316
|
bccab32b5543cb1fd57f6756f1a1383bff7a08cb
| 424
|
py
|
Python
|
configs/selfsup/_base_/datasets/imagenet100/mocov3_cnn_sz224_bs64.py
|
Westlake-AI/openmixup
|
ea81250819e740dd823e30cb7ce382d14a3c1b91
|
[
"Apache-2.0"
] | 10
|
2021-12-30T10:22:27.000Z
|
2022-03-30T02:31:38.000Z
|
configs/selfsup/_base_/datasets/imagenet100/mocov3_cnn_sz224_bs64.py
|
Westlake-AI/openmixup
|
ea81250819e740dd823e30cb7ce382d14a3c1b91
|
[
"Apache-2.0"
] | 3
|
2022-01-20T21:02:48.000Z
|
2022-03-19T13:49:45.000Z
|
configs/selfsup/_base_/datasets/imagenet100/mocov3_cnn_sz224_bs64.py
|
Westlake-AI/openmixup
|
ea81250819e740dd823e30cb7ce382d14a3c1b91
|
[
"Apache-2.0"
] | null | null | null |
_base_ = '../imagenet/mocov3_cnn_sz224_bs64.py'
# dataset settings
data_source_cfg = dict(type='ImageNet', return_label=False)
# ImageNet dataset, 100 class
data_train_list = 'data/meta/ImageNet100/train.txt'
data_train_root = 'data/ImageNet/train'
# dataset summary
data = dict(
train=dict(
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
))
| 26.5
| 60
| 0.709906
|
0ef33fe4c7deea448911316b631b051d5a7857c4
| 1,226
|
py
|
Python
|
app/models/items.py
|
boconlonton/python-fastapi-tutorial
|
ff3d08730e948e7cc972ecd1d9a55e7cc75fe335
|
[
"MIT"
] | null | null | null |
app/models/items.py
|
boconlonton/python-fastapi-tutorial
|
ff3d08730e948e7cc972ecd1d9a55e7cc75fe335
|
[
"MIT"
] | null | null | null |
app/models/items.py
|
boconlonton/python-fastapi-tutorial
|
ff3d08730e948e7cc972ecd1d9a55e7cc75fe335
|
[
"MIT"
] | null | null | null |
from typing import Optional
from typing import Set
from typing import List
from pydantic import BaseModel
from pydantic import Field
from pydantic import HttpUrl
class Image(BaseModel):
url: HttpUrl
name: str
class Item(BaseModel):
"""Contains the definition of the item"""
name: str
description: Optional[str] = Field(None,
title="The description of the item.",
max_length=300)
price: float = Field(...,
gt=0,
description="The price must be greater than zero.")
tax: Optional[float] = None
tags: Set[str] = []
images: Optional[List[Image]] = None
# Alternative way for creating example in document
# name: str = Field(..., example="Foo")
# description: Optional[str] = Field(None, example="A very nice Item")
# price: float = Field(..., example=35.4)
# tax: Optional[float] = Field(None, example=3.2)
class Config:
schema_extra = {
"example": {
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
}
}
| 28.511628
| 76
| 0.541599
|
7da05ec453a26d3c4d3b1d509f059692c970fac9
| 10,656
|
py
|
Python
|
src/datamodules/mri_datamodule.py
|
MohammedAljahdali/mental-disorder-brain-mri
|
0b6a348e0d741f363e7806a8aacefd1ef59a5ee6
|
[
"MIT"
] | null | null | null |
src/datamodules/mri_datamodule.py
|
MohammedAljahdali/mental-disorder-brain-mri
|
0b6a348e0d741f363e7806a8aacefd1ef59a5ee6
|
[
"MIT"
] | null | null | null |
src/datamodules/mri_datamodule.py
|
MohammedAljahdali/mental-disorder-brain-mri
|
0b6a348e0d741f363e7806a8aacefd1ef59a5ee6
|
[
"MIT"
] | null | null | null |
from typing import Optional, Tuple
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset, random_split
from torchvision.transforms import transforms
from src.datamodules.datasets.brian_scans_t1w import BrianScansT1w
from src.utils.utils import calculate_mean
from sklearn.model_selection import train_test_split
from src.utils import utils
import numpy as np
import torch
log = utils.get_logger(__name__)
class MRIDataModule(LightningDataModule):
"""
Example of LightningDataModule for MNIST dataset.
A DataModule implements 5 key methods:
- prepare_data (things to do on 1 GPU/TPU, not on every GPU/TPU in distributed mode)
- setup (things to do on every accelerator in distributed mode)
- train_dataloader (the training dataloader)
- val_dataloader (the validation dataloader(s))
- test_dataloader (the test dataloader(s))
This allows you to share a full dataset without explaining how to download,
split, transform and process the data
Read the docs:
https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html
"""
def __init__(
self,
dataset_dir,
data_dir: str = "data/",
train_val_test_split: Tuple[int, int, int] = (0.7, 0.15, 0.15),
batch_size: int = 64,
num_workers: int = 0,
pin_memory: bool = False,
**kwargs,
):
super().__init__()
self.dataset_dir = dataset_dir
self.data_dir = data_dir
self.train_val_test_split = train_val_test_split
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
self.labels_counter = None
self.train_transforms = None
self.test_transforms = None
self.data_train: Optional[Dataset] = None
self.data_val: Optional[Dataset] = None
self.data_test: Optional[Dataset] = None
@property
def num_classes(self) -> int:
return 4
def prepare_data(self):
"""Download data if needed. This method is called only from a single GPU.
Do not use it to assign state (self.x = y)."""
# BrianScansT1w(dataset_path=self.dataset_dir)
pass
def setup(self, stage: Optional[str] = None):
"""Load data. Set variables: self.data_train, self.data_val, self.data_test."""
dataset = BrianScansT1w(self.dataset_dir)
log.info(f"Calculating mean and std of the dataset")
# mean, std = calculate_mean(dataset, dataset.num_channels)
self.setup_transforms()
dataset = BrianScansT1w(self.dataset_dir, transform=self.test_transforms)
self.labels_counter = dataset.labels_counter
train_dataset_idx, val_dataset_idx = train_test_split(
np.arange(len(dataset.labels)),
train_size=0.6,
shuffle=True,
stratify=dataset.labels,
random_state=1
)
val_dataset_idx, test_dataset_idx = train_test_split(
val_dataset_idx,
train_size=0.5,
shuffle=True,
stratify=np.array(dataset.labels)[val_dataset_idx],
random_state=1
)
self.train_dataset = torch.utils.data.Subset(dataset, indices=train_dataset_idx)
self.val_dataset = torch.utils.data.Subset(dataset, indices=val_dataset_idx)
self.test_dataset = torch.utils.data.Subset(dataset, indices=test_dataset_idx)
print(f"Length of train is {len(self.train_dataset)}")
print(f"Length of val is {len(self.val_dataset)}")
print(f"Length of test is {len(self.test_dataset)}")
self.train_dataset.dataset.transform = self.train_transforms
def setup_transforms(self):
mean = [0.03555044, 0.03644094, 0.03768612, 0.03933044, 0.04138806, 0.04390845,
0.04686559, 0.05040561, 0.05488866, 0.0598497, 0.06535633, 0.07114838,
0.07741066, 0.08397495, 0.09015995, 0.09561275, 0.10028325, 0.10464429,
0.10884795, 0.11272568, 0.1161376, 0.11909771, 0.1223311, 0.12567622,
0.12900978, 0.13258312, 0.13632759, 0.14008128, 0.14384651, 0.14763705,
0.15057223, 0.15340333, 0.15568345, 0.1582634, 0.16053551, 0.1628206,
0.16502095, 0.16747784, 0.16994729, 0.17247222, 0.17502013, 0.17749757,
0.18003827, 0.18260108, 0.18498457, 0.18724774, 0.18951172, 0.19167611,
0.1937309, 0.1956074, 0.19740985, 0.19896994, 0.20041078, 0.20153163,
0.20255902, 0.20353528, 0.20445888, 0.20516288, 0.20576458, 0.20624929,
0.20666833, 0.20689151, 0.20727192, 0.20740478, 0.20758775, 0.20796604,
0.20835329, 0.20873604, 0.20921561, 0.20986974, 0.21050925, 0.2111183,
0.21174388, 0.21232274, 0.21283979, 0.21335694, 0.2137878, 0.21412231,
0.2144567, 0.21474577, 0.21493113, 0.21508262, 0.21541261, 0.215912,
0.21629999, 0.21669907, 0.21700149, 0.21684902, 0.21686486, 0.2172964,
0.21768935, 0.21822283, 0.21863202, 0.21888335, 0.21921261, 0.21952136,
0.21977312, 0.2198995, 0.21999373, 0.21971101, 0.21961603, 0.21914673,
0.21864955, 0.21820801, 0.21774024, 0.21724851, 0.21664351, 0.21629235,
0.21603627, 0.21577134, 0.21549865, 0.21532742, 0.21502935, 0.21478984,
0.21451452, 0.21409711, 0.21365262, 0.21315192, 0.2125571, 0.21187787,
0.21113619, 0.2102074, 0.20907159, 0.20776799, 0.20639179, 0.20474851,
0.20270969, 0.20069734, 0.1985597, 0.19620288, 0.19405762, 0.1918659,
0.18974683, 0.18762912, 0.18522535, 0.18316402, 0.18097264, 0.17867895,
0.17629378, 0.17363734, 0.17105392, 0.16822493, 0.16516284, 0.16186706,
0.15828171, 0.15451169, 0.15066763, 0.14670572, 0.14279159, 0.13876267,
0.13483779, 0.1311717, 0.12752733, 0.12386699, 0.12012876, 0.11637033,
0.11270375, 0.10875326, 0.10432421, 0.0990345, 0.0930741, 0.08673254,
0.08030588, 0.07383918, 0.06732377, 0.06130533, 0.05597395, 0.05127211,
0.0472601, 0.044025, 0.0412619, 0.03913275, 0.03758261, 0.03641299,
0.03561411, 0.03522878, ]
std = [0.06803005, 0.07072002, 0.07450564, 0.079443, 0.08571071, 0.09307344,
0.1010425, 0.11023663, 0.12135826, 0.13244933, 0.14425235, 0.15571473,
0.167254, 0.17845949, 0.18846179, 0.19648997, 0.2026066, 0.20810119,
0.21302142, 0.21713814, 0.22042945, 0.22316771, 0.22639252, 0.22939548,
0.23242819, 0.23559724, 0.23919037, 0.24283087, 0.24646596, 0.25012693,
0.25309173, 0.25575394, 0.2579583, 0.26033725, 0.26237134, 0.26442081,
0.26634439, 0.26843778, 0.27047218, 0.27246562, 0.27446751, 0.27630454,
0.2781014, 0.27986302, 0.28141551, 0.28283968, 0.28426665, 0.28561201,
0.28690088, 0.2881579, 0.28937461, 0.29030356, 0.29114825, 0.29168372,
0.29214104, 0.29259635, 0.29293159, 0.29310336, 0.29320688, 0.29319938,
0.29312642, 0.29286225, 0.29273992, 0.29236581, 0.29208713, 0.29193259,
0.29184538, 0.29170964, 0.29168197, 0.29195404, 0.29216955, 0.29241765,
0.2926225, 0.29273861, 0.29271646, 0.29263695, 0.29257242, 0.29238284,
0.29219282, 0.29189, 0.29148037, 0.29095512, 0.29059541, 0.29028949,
0.28966341, 0.28906776, 0.28847442, 0.28787701, 0.28777537, 0.28842476,
0.28931729, 0.29051657, 0.29148136, 0.29224929, 0.29311225, 0.29390509,
0.29464288, 0.29523965, 0.29577783, 0.29594713, 0.29623477, 0.2962341,
0.29615402, 0.29608266, 0.29598131, 0.29581731, 0.2955038, 0.29542323,
0.29540279, 0.29541899, 0.29547572, 0.29557613, 0.29565281, 0.29571751,
0.29575938, 0.29571197, 0.2956512, 0.29566676, 0.29555589, 0.29548786,
0.29536504, 0.29507944, 0.29461386, 0.29398311, 0.29336361, 0.29245323,
0.29120878, 0.28997606, 0.28864067, 0.28718624, 0.28584174, 0.28434571,
0.28283307, 0.28134204, 0.27959669, 0.27794219, 0.27620444, 0.27449423,
0.27257897, 0.270358, 0.26810549, 0.26562898, 0.26304082, 0.26012738,
0.25704775, 0.25382875, 0.25047722, 0.24689257, 0.24326167, 0.2395838,
0.23606098, 0.2328734, 0.2299421, 0.22699105, 0.22396253, 0.22079888,
0.21772, 0.21408227, 0.20911748, 0.20225117, 0.1937677, 0.18413421,
0.17367109, 0.16243242, 0.15000106, 0.13787893, 0.12607822, 0.11458374,
0.10421457, 0.09498407, 0.08646723, 0.07967396, 0.07455624, 0.07059492,
0.06807684, 0.0670008, ]
self.train_transforms = transforms.Compose([
transforms.Compose([
transforms.ToTensor(),
transforms.Pad((12, 12, 12, 12)),
transforms.RandomAffine(degrees=40, shear=(1,4), translate=(0.4, 0.4), scale=(0.5, 1.25)),
transforms.RandomPerspective(),
transforms.RandomHorizontalFlip(),
transforms.Normalize(mean=mean, std=std),
transforms.Resize((64, 64)),
transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1)),
# transforms.Lambda(lambda x: x.permute(1, 0, 2, 3))
])
])
self.test_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std),
transforms.Resize((64, 64)),
transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1)),
transforms.Lambda(lambda x: x.permute(1, 0, 2, 3))
])
def train_dataloader(self):
return DataLoader(
dataset=self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=True,
)
def val_dataloader(self):
return DataLoader(
dataset=self.val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=False,
)
def test_dataloader(self):
return DataLoader(
dataset=self.test_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=False,
)
| 50.985646
| 106
| 0.621997
|
b9e8ef1abcac155ebe5f904f3c274048ada2725d
| 4,324
|
py
|
Python
|
homeassistant/components/adax/config_flow.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/adax/config_flow.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
homeassistant/components/adax/config_flow.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Config flow for Adax integration."""
from __future__ import annotations
import logging
from typing import Any
import adax
import adax_local
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_IP_ADDRESS,
CONF_PASSWORD,
CONF_TOKEN,
CONF_UNIQUE_ID,
)
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import (
ACCOUNT_ID,
CLOUD,
CONNECTION_TYPE,
DOMAIN,
LOCAL,
WIFI_PSWD,
WIFI_SSID,
)
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Adax."""
VERSION = 2
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
data_schema = vol.Schema(
{
vol.Required(CONNECTION_TYPE, default=CLOUD): vol.In(
(
CLOUD,
LOCAL,
)
)
}
)
if user_input is None:
return self.async_show_form(
step_id="user",
data_schema=data_schema,
)
if user_input[CONNECTION_TYPE] == LOCAL:
return await self.async_step_local()
return await self.async_step_cloud()
async def async_step_local(self, user_input=None):
"""Handle the local step."""
data_schema = vol.Schema(
{vol.Required(WIFI_SSID): str, vol.Required(WIFI_PSWD): str}
)
if user_input is None:
return self.async_show_form(
step_id="local",
data_schema=data_schema,
)
wifi_ssid = user_input[WIFI_SSID].replace(" ", "")
wifi_pswd = user_input[WIFI_PSWD].replace(" ", "")
configurator = adax_local.AdaxConfig(wifi_ssid, wifi_pswd)
try:
device_configured = await configurator.configure_device()
except adax_local.HeaterNotAvailable:
return self.async_abort(reason="heater_not_available")
except adax_local.HeaterNotFound:
return self.async_abort(reason="heater_not_found")
except adax_local.InvalidWifiCred:
return self.async_abort(reason="invalid_auth")
if not device_configured:
return self.async_show_form(
step_id="local",
data_schema=data_schema,
errors={"base": "cannot_connect"},
)
unique_id = str(configurator.mac_id)
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=unique_id,
data={
CONF_IP_ADDRESS: configurator.device_ip,
CONF_TOKEN: configurator.access_token,
CONF_UNIQUE_ID: unique_id,
CONNECTION_TYPE: LOCAL,
},
)
async def async_step_cloud(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the cloud step."""
data_schema = vol.Schema(
{vol.Required(ACCOUNT_ID): int, vol.Required(CONF_PASSWORD): str}
)
if user_input is None:
return self.async_show_form(step_id="cloud", data_schema=data_schema)
errors = {}
await self.async_set_unique_id(str(user_input[ACCOUNT_ID]))
self._abort_if_unique_id_configured()
account_id = user_input[ACCOUNT_ID]
password = user_input[CONF_PASSWORD].replace(" ", "")
token = await adax.get_adax_token(
async_get_clientsession(self.hass), account_id, password
)
if token is None:
_LOGGER.info("Adax: Failed to login to retrieve token")
errors["base"] = "cannot_connect"
return self.async_show_form(
step_id="cloud",
data_schema=data_schema,
errors=errors,
)
return self.async_create_entry(
title=str(user_input[ACCOUNT_ID]),
data={
ACCOUNT_ID: account_id,
CONF_PASSWORD: password,
CONNECTION_TYPE: CLOUD,
},
)
| 29.82069
| 81
| 0.593663
|
81c33fd117135c3d1d6a9bcebae6840224dc8e13
| 15,475
|
py
|
Python
|
econml/tests/test_bootstrap.py
|
SpencerCompton/EconML
|
35c5418618d4ca9828f5465c090dd17e5e9a263c
|
[
"BSD-3-Clause"
] | null | null | null |
econml/tests/test_bootstrap.py
|
SpencerCompton/EconML
|
35c5418618d4ca9828f5465c090dd17e5e9a263c
|
[
"BSD-3-Clause"
] | null | null | null |
econml/tests/test_bootstrap.py
|
SpencerCompton/EconML
|
35c5418618d4ca9828f5465c090dd17e5e9a263c
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from econml.bootstrap import BootstrapEstimator
from econml.inference import BootstrapInference
from econml.dml import LinearDML
from econml.ortho_iv import LinearIntentToTreatDRIV
from econml.two_stage_least_squares import NonparametricTwoStageLeastSquares
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
import unittest
import joblib
class TestBootstrap(unittest.TestCase):
def test_with_sklearn(self):
"""Test that we can bootstrap sklearn estimators."""
for n_jobs in [None, -1]: # test parallelism
for kind in ['percentile', 'pivot', 'normal']: # test both percentile and pivot intervals
x = np.random.normal(size=(1000, 1))
y = x * 0.5 + np.random.normal(size=(1000, 1))
y = y.flatten()
est = LinearRegression()
est.fit(x, y)
bs = BootstrapEstimator(est, 50, n_jobs=n_jobs, bootstrap_type=kind)
# test that we can fit with the same arguments as the base estimator
bs.fit(x, y)
# test that we can get the same attribute for the bootstrap as the original, with the same shape
self.assertEqual(np.shape(est.coef_), np.shape(bs.coef_))
# test that we can get an interval for the same attribute for the bootstrap as the original,
# with the same shape for the lower and upper bounds
lower, upper = bs.coef__interval()
for bound in [lower, upper]:
self.assertEqual(np.shape(est.coef_), np.shape(bound))
# test that the lower and upper bounds differ
assert (lower <= upper).all()
assert (lower < upper).any()
# test that we can do the same thing once we provide percentile bounds
lower, upper = bs.coef__interval(lower=10, upper=90)
for bound in [lower, upper]:
self.assertEqual(np.shape(est.coef_), np.shape(bound))
# test that the lower and upper bounds differ
assert (lower <= upper).all()
assert (lower < upper).any()
# test that we can do the same thing with the results of a method, rather than an attribute
self.assertEqual(np.shape(est.predict(x)), np.shape(bs.predict(x)))
# test that we can get an interval for the same attribute for the bootstrap as the original,
# with the same shape for the lower and upper bounds
lower, upper = bs.predict_interval(x)
for bound in [lower, upper]:
self.assertEqual(np.shape(est.predict(x)), np.shape(bound))
# test that the lower and upper bounds differ
assert (lower <= upper).all()
assert (lower < upper).any()
# test that we can do the same thing once we provide percentile bounds
lower, upper = bs.predict_interval(x, lower=10, upper=90)
for bound in [lower, upper]:
self.assertEqual(np.shape(est.predict(x)), np.shape(bound))
# test that the lower and upper bounds differ
assert (lower <= upper).all()
assert (lower < upper).any()
def test_with_econml(self):
"""Test that we can bootstrap econml estimators."""
x = np.random.normal(size=(1000, 2))
t = np.random.normal(size=(1000, 1))
t2 = np.random.normal(size=(1000, 1))
y = x[:, 0:1] * 0.5 + t + np.random.normal(size=(1000, 1))
est = LinearDML(model_y=LinearRegression(), model_t=LinearRegression())
est.fit(y, t, X=x)
bs = BootstrapEstimator(est, 50)
# test that we can fit with the same arguments as the base estimator
bs.fit(y, t, X=x)
# test that we can get the same attribute for the bootstrap as the original, with the same shape
self.assertEqual(np.shape(est.coef_), np.shape(bs.coef_))
# test that we can get an interval for the same attribute for the bootstrap as the original,
# with the same shape for the lower and upper bounds
lower, upper = bs.coef__interval()
for bound in [lower, upper]:
self.assertEqual(np.shape(est.coef_), np.shape(bound))
# test that the lower and upper bounds differ
assert (lower <= upper).all()
assert (lower < upper).any()
# test that we can do the same thing once we provide percentile bounds
lower, upper = bs.coef__interval(lower=10, upper=90)
for bound in [lower, upper]:
self.assertEqual(np.shape(est.coef_), np.shape(bound))
# test that we can do the same thing with the results of a method, rather than an attribute
self.assertEqual(np.shape(est.effect(x, T0=t, T1=t2)), np.shape(bs.effect(x, T0=t, T1=t2)))
# test that we can get an interval for the same attribute for the bootstrap as the original,
# with the same shape for the lower and upper bounds
lower, upper = bs.effect_interval(x, T0=t, T1=t2)
for bound in [lower, upper]:
self.assertEqual(np.shape(est.effect(x, T0=t, T1=t2)), np.shape(bound))
# test that the lower and upper bounds differ
assert (lower <= upper).all()
assert (lower < upper).any()
# test that we can do the same thing once we provide percentile bounds
lower, upper = bs.effect_interval(x, T0=t, T1=t2, lower=10, upper=90)
for bound in [lower, upper]:
self.assertEqual(np.shape(est.effect(x, T0=t, T1=t2)), np.shape(bound))
# test that the lower and upper bounds differ
assert (lower <= upper).all()
assert (lower < upper).any()
def test_backends(self):
"""Test that we can use threading or multiprocess backends."""
for backend in ['threading', 'loky']:
with joblib.parallel_backend(backend):
x = np.random.normal(size=(1000, 1))
y = x * 0.5 + np.random.normal(size=(1000, 1))
y = y.flatten()
est = LinearRegression()
est.fit(x, y)
bs = BootstrapEstimator(est, 50, n_jobs=2)
# test that we can fit with the same arguments as the base estimator
bs.fit(x, y)
# test that we can get the same attribute for the bootstrap as the original, with the same shape
self.assertEqual(np.shape(est.coef_), np.shape(bs.coef_))
# test that we can get an interval for the same attribute for the bootstrap as the original,
# with the same shape for the lower and upper bounds
lower, upper = bs.coef__interval()
for bound in [lower, upper]:
self.assertEqual(np.shape(est.coef_), np.shape(bound))
# test that the lower and upper bounds differ
assert (lower <= upper).all()
assert (lower < upper).any()
# test that we can do the same thing once we provide percentile bounds
lower, upper = bs.coef__interval(lower=10, upper=90)
for bound in [lower, upper]:
self.assertEqual(np.shape(est.coef_), np.shape(bound))
# test that the lower and upper bounds differ
assert (lower <= upper).all()
assert (lower < upper).any()
# test that we can do the same thing with the results of a method, rather than an attribute
self.assertEqual(np.shape(est.predict(x)), np.shape(bs.predict(x)))
# test that we can get an interval for the same attribute for the bootstrap as the original,
# with the same shape for the lower and upper bounds
lower, upper = bs.predict_interval(x)
for bound in [lower, upper]:
self.assertEqual(np.shape(est.predict(x)), np.shape(bound))
# test that the lower and upper bounds differ
assert (lower <= upper).all()
assert (lower < upper).any()
# test that we can do the same thing once we provide percentile bounds
lower, upper = bs.predict_interval(x, lower=10, upper=90)
for bound in [lower, upper]:
self.assertEqual(np.shape(est.predict(x)), np.shape(bound))
# test that the lower and upper bounds differ
assert (lower <= upper).all()
assert (lower < upper).any()
def test_internal(self):
"""Test that the internal use of bootstrap within an estimator works."""
x = np.random.normal(size=(1000, 2))
t = np.random.normal(size=(1000, 1))
t2 = np.random.normal(size=(1000, 1))
y = x[:, 0:1] * 0.5 + t + np.random.normal(size=(1000, 1))
est = LinearDML(model_y=LinearRegression(), model_t=LinearRegression())
est.fit(y, t, X=x, inference='bootstrap')
# test that we can get an interval for the same attribute for the bootstrap as the original,
# with the same shape for the lower and upper bounds
eff = est.effect(x, T0=t, T1=t2)
lower, upper = est.effect_interval(x, T0=t, T1=t2)
for bound in [lower, upper]:
self.assertEqual(np.shape(eff), np.shape(bound))
# test that the lower and upper bounds differ
assert (lower <= upper).all()
assert (lower < upper).any()
# test that the estimated effect is usually within the bounds
assert np.mean(np.logical_and(lower <= eff, eff <= upper)) >= 0.9
# test that we can do the same thing once we provide alpha explicitly
lower, upper = est.effect_interval(x, T0=t, T1=t2, alpha=0.2)
for bound in [lower, upper]:
self.assertEqual(np.shape(eff), np.shape(bound))
# test that the lower and upper bounds differ
assert (lower <= upper).all()
assert (lower < upper).any()
# test that the estimated effect is usually within the bounds
assert np.mean(np.logical_and(lower <= eff, eff <= upper)) >= 0.8
def test_internal_options(self):
"""Test that the internal use of bootstrap within an estimator using custom options works."""
x = np.random.normal(size=(1000, 2))
z = np.random.normal(size=(1000, 1))
t = np.random.normal(size=(1000, 1))
t2 = np.random.normal(size=(1000, 1))
y = x[:, 0:1] * 0.5 + t + np.random.normal(size=(1000, 1))
opts = BootstrapInference(50, 2)
est = NonparametricTwoStageLeastSquares(t_featurizer=PolynomialFeatures(2),
x_featurizer=PolynomialFeatures(2),
z_featurizer=PolynomialFeatures(2),
dt_featurizer=None)
est.fit(y, t, X=x, W=None, Z=z, inference=opts)
# test that we can get an interval for the same attribute for the bootstrap as the original,
# with the same shape for the lower and upper bounds
eff = est.effect(x, T0=t, T1=t2)
lower, upper = est.effect_interval(x, T0=t, T1=t2)
for bound in [lower, upper]:
self.assertEqual(np.shape(eff), np.shape(bound))
# test that the lower and upper bounds differ
assert (lower <= upper).all()
assert (lower < upper).any()
# TODO: test that the estimated effect is usually within the bounds
# and that the true effect is also usually within the bounds
# test that we can do the same thing once we provide percentile bounds
lower, upper = est.effect_interval(x, T0=t, T1=t2, alpha=0.2)
for bound in [lower, upper]:
self.assertEqual(np.shape(eff), np.shape(bound))
# test that the lower and upper bounds differ
assert (lower <= upper).all()
assert (lower < upper).any()
# TODO: test that the estimated effect is usually within the bounds
# and that the true effect is also usually within the bounds
def test_stratify(self):
"""Test that we can properly stratify by treatment"""
T = [1, 0, 1, 2, 0, 2]
Y = [1, 2, 3, 4, 5, 6]
X = np.array([1, 1, 2, 2, 1, 2]).reshape(-1, 1)
est = LinearDML(model_y=LinearRegression(), model_t=LogisticRegression(), discrete_treatment=True)
inference = BootstrapInference(n_bootstrap_samples=5)
est.fit(Y, T, inference=inference)
est.const_marginal_effect_interval()
est.fit(Y, T, X=X, inference=inference)
est.const_marginal_effect_interval(X)
est.fit(Y, np.asarray(T).reshape(-1, 1), inference=inference) # test stratifying 2D treatment
est.const_marginal_effect_interval()
def test_stratify_orthoiv(self):
"""Test that we can properly stratify by treatment/instrument pair"""
T = [1, 0, 1, 1, 0, 0, 1, 0]
Z = [1, 0, 0, 1, 0, 1, 0, 1]
Y = [1, 2, 3, 4, 5, 6, 7, 8]
X = np.array([1, 1, 2, 2, 1, 2, 1, 2]).reshape(-1, 1)
est = LinearIntentToTreatDRIV(model_Y_X=LinearRegression(), model_T_XZ=LogisticRegression(),
flexible_model_effect=LinearRegression(), cv=2)
inference = BootstrapInference(n_bootstrap_samples=20)
est.fit(Y, T, Z=Z, X=X, inference=inference)
est.const_marginal_effect_interval(X)
def test_all_kinds(self):
T = [1, 0, 1, 2, 0, 2] * 5
Y = [1, 2, 3, 4, 5, 6] * 5
X = np.array([1, 1, 2, 2, 1, 2] * 5).reshape(-1, 1)
est = LinearDML(cv=2)
for kind in ['percentile', 'pivot', 'normal']:
with self.subTest(kind=kind):
inference = BootstrapInference(n_bootstrap_samples=5, bootstrap_type=kind)
est.fit(Y, T, inference=inference)
i = est.const_marginal_effect_interval()
inf = est.const_marginal_effect_inference()
assert i[0].shape == i[1].shape == inf.point_estimate.shape
assert np.allclose(i[0], inf.conf_int()[0])
assert np.allclose(i[1], inf.conf_int()[1])
est.fit(Y, T, X=X, inference=inference)
i = est.const_marginal_effect_interval(X)
inf = est.const_marginal_effect_inference(X)
assert i[0].shape == i[1].shape == inf.point_estimate.shape
assert np.allclose(i[0], inf.conf_int()[0])
assert np.allclose(i[1], inf.conf_int()[1])
i = est.coef__interval()
inf = est.coef__inference()
assert i[0].shape == i[1].shape == inf.point_estimate.shape
assert np.allclose(i[0], inf.conf_int()[0])
assert np.allclose(i[1], inf.conf_int()[1])
i = est.effect_interval(X)
inf = est.effect_inference(X)
assert i[0].shape == i[1].shape == inf.point_estimate.shape
assert np.allclose(i[0], inf.conf_int()[0])
assert np.allclose(i[1], inf.conf_int()[1])
| 46.611446
| 112
| 0.587334
|
6036c4c88d2f0b7694d189829fd0d72400f900b6
| 4,933
|
py
|
Python
|
venv/Lib/site-packages/flask_api/app.py
|
matuhn/FPT-Capstone
|
d67217b77ce723136bafd436e664a6d9fa4fb00b
|
[
"MIT"
] | 1
|
2021-05-07T13:07:47.000Z
|
2021-05-07T13:07:47.000Z
|
venv/Lib/site-packages/flask_api/app.py
|
matuhn/FPT-Capstone
|
d67217b77ce723136bafd436e664a6d9fa4fb00b
|
[
"MIT"
] | 2
|
2021-05-07T12:58:34.000Z
|
2021-06-12T22:17:07.000Z
|
venv/Lib/site-packages/flask_api/app.py
|
matuhn/FPT-Capstone
|
d67217b77ce723136bafd436e664a6d9fa4fb00b
|
[
"MIT"
] | 2
|
2021-04-22T08:20:28.000Z
|
2022-01-11T01:13:29.000Z
|
# coding: utf8
from __future__ import unicode_literals
from flask import request, Flask, Blueprint
from flask._compat import reraise, string_types, text_type
from flask_api.exceptions import APIException
from flask_api.request import APIRequest
from flask_api.response import APIResponse
from flask_api.settings import APISettings
from flask_api.status import HTTP_204_NO_CONTENT
from itertools import chain
from werkzeug.exceptions import HTTPException
import re
import sys
from flask_api.compat import is_flask_legacy
api_resources = Blueprint(
'flask-api', __name__,
url_prefix='/flask-api',
template_folder='templates', static_folder='static'
)
def urlize_quoted_links(content):
return re.sub(r'"(https?://[^"]*)"', r'"<a href="\1">\1</a>"', content)
class FlaskAPI(Flask):
request_class = APIRequest
response_class = APIResponse
def __init__(self, *args, **kwargs):
super(FlaskAPI, self).__init__(*args, **kwargs)
self.api_settings = APISettings(self.config)
self.register_blueprint(api_resources)
self.jinja_env.filters['urlize_quoted_links'] = urlize_quoted_links
def preprocess_request(self):
request.parser_classes = self.api_settings.DEFAULT_PARSERS
request.renderer_classes = self.api_settings.DEFAULT_RENDERERS
return super(FlaskAPI, self).preprocess_request()
def make_response(self, rv):
"""
We override this so that we can additionally handle
list and dict types by default.
"""
status_or_headers = headers = None
if isinstance(rv, tuple):
rv, status_or_headers, headers = rv + (None,) * (3 - len(rv))
if rv is None and status_or_headers == HTTP_204_NO_CONTENT:
rv = ''
if rv is None and status_or_headers:
raise ValueError('View function did not return a response')
if isinstance(status_or_headers, (dict, list)):
headers, status_or_headers = status_or_headers, None
if not isinstance(rv, self.response_class):
if isinstance(rv, (text_type, bytes, bytearray, list, dict)):
status = status_or_headers
rv = self.response_class(rv, headers=headers, status=status)
headers = status_or_headers = None
else:
rv = self.response_class.force_type(rv, request.environ)
if status_or_headers is not None:
if isinstance(status_or_headers, string_types):
rv.status = status_or_headers
else:
rv.status_code = status_or_headers
if headers:
rv.headers.extend(headers)
return rv
def handle_user_exception(self, e):
"""
We override the default behavior in order to deal with APIException.
"""
exc_type, exc_value, tb = sys.exc_info()
assert exc_value is e
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(e)
if isinstance(e, APIException):
return self.handle_api_exception(e)
blueprint_handlers = ()
handlers = self.error_handler_spec.get(request.blueprint)
if handlers is not None:
blueprint_handlers = handlers.get(None, ())
app_handlers = self.error_handler_spec[None].get(None, ())
if is_flask_legacy():
for typecheck, handler in chain(blueprint_handlers, app_handlers):
if isinstance(e, typecheck):
return handler(e)
else:
for typecheck, handler in chain(dict(blueprint_handlers).items(),
dict(app_handlers).items()):
if isinstance(e, typecheck):
return handler(e)
reraise(exc_type, exc_value, tb)
def handle_api_exception(self, exc):
content = {'message': exc.detail}
status = exc.status_code
return self.response_class(content, status=status)
def create_url_adapter(self, request):
"""
We need to override the default behavior slightly here,
to ensure the any method-based routing takes account of
any method overloading, so that eg PUT requests from the
browsable API are routed to the correct view.
"""
if request is not None:
environ = request.environ.copy()
environ['REQUEST_METHOD'] = request.method
return self.url_map.bind_to_environ(environ,
server_name=self.config['SERVER_NAME'])
# We need at the very least the server name to be set for this
# to work.
if self.config['SERVER_NAME'] is not None:
return self.url_map.bind(
self.config['SERVER_NAME'],
script_name=self.config['APPLICATION_ROOT'] or '/',
url_scheme=self.config['PREFERRED_URL_SCHEME'])
| 37.090226
| 78
| 0.647071
|
2e4f416f372b8c428f380961959b7fcb4589c7ea
| 9,615
|
py
|
Python
|
resources/scripts/documentation/conf.py
|
DerekMRoberts93/PromptAdminWebsite
|
b2eb455f4825ff3efc498e0df693101f898e8fb0
|
[
"MIT"
] | null | null | null |
resources/scripts/documentation/conf.py
|
DerekMRoberts93/PromptAdminWebsite
|
b2eb455f4825ff3efc498e0df693101f898e8fb0
|
[
"MIT"
] | null | null | null |
resources/scripts/documentation/conf.py
|
DerekMRoberts93/PromptAdminWebsite
|
b2eb455f4825ff3efc498e0df693101f898e8fb0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# MongoDB API documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 20 16:00:31 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MongoDB API'
copyright = u'2016, AJ'
author = u'AJ'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'MongoDB API v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'MongoDBAPIdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MongoDBAPI.tex', u'MongoDB API Documentation',
u'AJ', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mongodbapi', u'MongoDB API Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MongoDBAPI', u'MongoDB API Documentation',
author, 'MongoDBAPI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| 28.616071
| 80
| 0.702444
|
046042090287d045c8a937a80260878e3eb34bcd
| 3,741
|
py
|
Python
|
moksha/hub/amqp/qpid010.py
|
lmacken/moksha
|
b75325b8fd0fb3ea2d393ddb81c27f32ae7e0b96
|
[
"Apache-2.0"
] | 1
|
2019-06-27T11:36:50.000Z
|
2019-06-27T11:36:50.000Z
|
moksha/hub/amqp/qpid010.py
|
lmacken/moksha
|
b75325b8fd0fb3ea2d393ddb81c27f32ae7e0b96
|
[
"Apache-2.0"
] | null | null | null |
moksha/hub/amqp/qpid010.py
|
lmacken/moksha
|
b75325b8fd0fb3ea2d393ddb81c27f32ae7e0b96
|
[
"Apache-2.0"
] | null | null | null |
# This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Luke Macken <lmacken@redhat.com>
import logging
from qpid.util import connect, URL, ssl
from qpid.datatypes import Message, uuid4, RangedSet
from qpid.connection import Connection
from qpid.session import SessionClosed
from moksha.hub.amqp.base import BaseAMQPHub
log = logging.getLogger('moksha.hub')
class QpidAMQPHub(BaseAMQPHub):
"""
Initialize the Moksha Hub.
`broker`
[amqps://][<user>[/<password>]@]<host>[:<port>]
"""
def __init__(self, broker, **kw):
self.set_broker(broker)
self.socket = connect(self.host, self.port)
if self.url.scheme == URL.AMQPS:
self.socket = ssl(self.socket)
self.connection = Connection(sock=self.socket,
username=self.user,
password=self.password)
self.connection.start()
log.info("Connected to AMQP Broker %s" % self.host)
self.session = self.connection.session(str(uuid4()))
def set_broker(self, broker):
self.url = URL(broker)
self.user = self.url.password or 'guest'
self.password = self.url.password or 'guest'
self.host = self.url.host
if self.url.scheme == URL.AMQPS:
self.ssl = True
default_port = 5671
else:
self.ssl = False
default_port = 5672
self.port = self.url.port or default_port
def send_message(self, topic, message, exchange='amq.topic', **headers):
props = self.session.delivery_properties(**headers)
msg = Message(props, message)
self.session.message_transfer(destination=exchange, message=msg)
def subscribe_queue(self, server_queue_name, local_queue_name):
queue = self.session.incoming(local_queue_name)
self.session.message_subscribe(queue=server_queue_name,
destination=local_queue_name)
queue.start()
return queue
def queue_declare(self, queue, durable=True, exclusive=False,
auto_delete=False, **kw):
self.session.queue_declare(queue=queue, exclusive=exclusive,
auto_delete=auto_delete,
arguments={'qpid.max_count': 0,
'qpid.max_size': 0}, **kw)
def exchange_bind(self, queue, exchange='amq.topic', binding_key=None):
self.session.exchange_bind(exchange=exchange, queue=queue,
binding_key=binding_key)
def message_subscribe(self, queue, destination):
return self.session.message_subscribe(queue=queue,
destination=destination)
def message_accept(self, message):
try:
self.session.message_accept(RangedSet(message.id))
except SessionClosed:
log.debug("Accepted message on closed session: %s" % message.id)
pass
def close(self):
self.session.close(timeout=2)
self.connection.close(timeout=2)
self.socket.close()
| 37.039604
| 76
| 0.62363
|
4baf4cb66c1767a160add23924fc109fe46abc20
| 27,548
|
py
|
Python
|
python/ccxt/coinmate.py
|
evgenyfedorenko/ccxt
|
97874a8562026087c836a269246ce27665113497
|
[
"MIT"
] | null | null | null |
python/ccxt/coinmate.py
|
evgenyfedorenko/ccxt
|
97874a8562026087c836a269246ce27665113497
|
[
"MIT"
] | null | null | null |
python/ccxt/coinmate.py
|
evgenyfedorenko/ccxt
|
97874a8562026087c836a269246ce27665113497
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
class coinmate(Exchange):
def describe(self):
return self.deep_extend(super(coinmate, self).describe(), {
'id': 'coinmate',
'name': 'CoinMate',
'countries': ['GB', 'CZ', 'EU'], # UK, Czech Republic
'rateLimit': 1000,
'has': {
'cancelOrder': True,
'CORS': True,
'createOrder': True,
'fetchBalance': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTrades': True,
'fetchTransactions': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/87460806-1c9f3f00-c616-11ea-8c46-a77018a8f3f4.jpg',
'api': 'https://coinmate.io/api',
'www': 'https://coinmate.io',
'fees': 'https://coinmate.io/fees',
'doc': [
'https://coinmate.docs.apiary.io',
'https://coinmate.io/developers',
],
'referral': 'https://coinmate.io?referral=YTFkM1RsOWFObVpmY1ZjMGREQmpTRnBsWjJJNVp3PT0',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'get': [
'orderBook',
'ticker',
'transactions',
'tradingPairs',
],
},
'private': {
'post': [
'balances',
'bitcoinCashWithdrawal',
'bitcoinCashDepositAddresses',
'bitcoinDepositAddresses',
'bitcoinWithdrawal',
'bitcoinWithdrawalFees',
'buyInstant',
'buyLimit',
'cancelOrder',
'cancelOrderWithInfo',
'createVoucher',
'dashDepositAddresses',
'dashWithdrawal',
'ethereumWithdrawal',
'ethereumDepositAddresses',
'litecoinWithdrawal',
'litecoinDepositAddresses',
'openOrders',
'order',
'orderHistory',
'orderById',
'pusherAuth',
'redeemVoucher',
'replaceByBuyLimit',
'replaceByBuyInstant',
'replaceBySellLimit',
'replaceBySellInstant',
'rippleDepositAddresses',
'rippleWithdrawal',
'sellInstant',
'sellLimit',
'transactionHistory',
'traderFees',
'tradeHistory',
'transfer',
'transferHistory',
'unconfirmedBitcoinDeposits',
'unconfirmedBitcoinCashDeposits',
'unconfirmedDashDeposits',
'unconfirmedEthereumDeposits',
'unconfirmedLitecoinDeposits',
'unconfirmedRippleDeposits',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'maker': 0.12 / 100,
'taker': 0.25 / 100,
'tiers': {
'taker': [
[0, 0.25 / 100],
[10000, 0.23 / 100],
[100000, 0.21 / 100],
[250000, 0.20 / 100],
[500000, 0.15 / 100],
[1000000, 0.13 / 100],
[3000000, 0.10 / 100],
[15000000, 0.05 / 100],
],
'maker': [
[0, 0.12 / 100],
[10000, 0.11 / 100],
[1000000, 0.10 / 100],
[250000, 0.08 / 100],
[500000, 0.05 / 100],
[1000000, 0.03 / 100],
[3000000, 0.02 / 100],
[15000000, 0],
],
},
},
'promotional': {
'trading': {
'maker': 0.05 / 100,
'taker': 0.15 / 100,
'tiers': {
'taker': [
[0, 0.15 / 100],
[10000, 0.14 / 100],
[100000, 0.13 / 100],
[250000, 0.12 / 100],
[500000, 0.11 / 100],
[1000000, 0.1 / 100],
[3000000, 0.08 / 100],
[15000000, 0.05 / 100],
],
'maker': [
[0, 0.05 / 100],
[10000, 0.04 / 100],
[1000000, 0.03 / 100],
[250000, 0.02 / 100],
[500000, 0],
[1000000, 0],
[3000000, 0],
[15000000, 0],
],
},
},
},
},
'options': {
'promotionalMarkets': ['ETH/EUR', 'ETH/CZK', 'ETH/BTC', 'XRP/EUR', 'XRP/CZK', 'XRP/BTC', 'DASH/EUR', 'DASH/CZK', 'DASH/BTC', 'BCH/EUR', 'BCH/CZK', 'BCH/BTC'],
},
'exceptions': {
'exact': {
'No order with given ID': OrderNotFound,
},
'broad': {
'Not enough account balance available': InsufficientFunds,
'Incorrect order ID': InvalidOrder,
'Minimum Order Size ': InvalidOrder,
'TOO MANY REQUESTS': RateLimitExceeded,
},
},
})
def fetch_markets(self, params={}):
response = self.publicGetTradingPairs(params)
#
# {
# "error":false,
# "errorMessage":null,
# "data": [
# {
# "name":"BTC_EUR",
# "firstCurrency":"BTC",
# "secondCurrency":"EUR",
# "priceDecimals":2,
# "lotDecimals":8,
# "minAmount":0.0002,
# "tradesWebSocketChannelId":"trades-BTC_EUR",
# "orderBookWebSocketChannelId":"order_book-BTC_EUR",
# "tradeStatisticsWebSocketChannelId":"statistics-BTC_EUR"
# },
# ]
# }
#
data = self.safe_value(response, 'data')
result = []
for i in range(0, len(data)):
market = data[i]
id = self.safe_string(market, 'name')
baseId = self.safe_string(market, 'firstCurrency')
quoteId = self.safe_string(market, 'secondCurrency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
promotionalMarkets = self.safe_value(self.options, 'promotionalMarkets', [])
fees = self.safe_value(self.fees, 'trading')
if self.in_array(symbol, promotionalMarkets):
promotionalFees = self.safe_value(self.fees, 'promotional', {})
fees = self.safe_value(promotionalFees, 'trading', fees)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': None,
'maker': fees['maker'],
'taker': fees['taker'],
'info': market,
'precision': {
'price': self.safe_integer(market, 'priceDecimals'),
'amount': self.safe_integer(market, 'lotDecimals'),
},
'limits': {
'amount': {
'min': self.safe_float(market, 'minAmount'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostBalances(params)
balances = self.safe_value(response, 'data')
result = {'info': response}
currencyIds = list(balances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
balance = self.safe_value(balances, currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'available')
account['used'] = self.safe_float(balance, 'reserved')
account['total'] = self.safe_float(balance, 'balance')
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'currencyPair': self.market_id(symbol),
'groupByPriceLimit': 'False',
}
response = self.publicGetOrderBook(self.extend(request, params))
orderbook = response['data']
timestamp = self.safe_timestamp(orderbook, 'timestamp')
return self.parse_order_book(orderbook, timestamp, 'bids', 'asks', 'price', 'amount')
def fetch_ticker(self, symbol, params={}):
self.load_markets()
request = {
'currencyPair': self.market_id(symbol),
}
response = self.publicGetTicker(self.extend(request, params))
ticker = self.safe_value(response, 'data')
timestamp = self.safe_timestamp(ticker, 'timestamp')
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'vwap': None,
'askVolume': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'amount'),
'quoteVolume': None,
'info': ticker,
}
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
'limit': 1000,
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['timestampFrom'] = since
if code is not None:
request['currency'] = self.currency_id(code)
response = self.privatePostTransferHistory(self.extend(request, params))
items = response['data']
return self.parse_transactions(items, None, since, limit)
def parse_transaction_status(self, status):
statuses = {
# any other types ?
'COMPLETED': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, item, currency=None):
#
# deposits
#
# {
# transactionId: 1862815,
# timestamp: 1516803982388,
# amountCurrency: 'LTC',
# amount: 1,
# fee: 0,
# walletType: 'LTC',
# transferType: 'DEPOSIT',
# transferStatus: 'COMPLETED',
# txid:
# 'ccb9255dfa874e6c28f1a64179769164025329d65e5201849c2400abd6bce245',
# destination: 'LQrtSKA6LnhcwRrEuiborQJnjFF56xqsFn',
# destinationTag: null
# }
#
# withdrawals
#
# {
# transactionId: 2140966,
# timestamp: 1519314282976,
# amountCurrency: 'EUR',
# amount: 8421.7228,
# fee: 16.8772,
# walletType: 'BANK_WIRE',
# transferType: 'WITHDRAWAL',
# transferStatus: 'COMPLETED',
# txid: null,
# destination: null,
# destinationTag: null
# }
#
timestamp = self.safe_integer(item, 'timestamp')
amount = self.safe_float(item, 'amount')
fee = self.safe_float(item, 'fee')
txid = self.safe_string(item, 'txid')
address = self.safe_string(item, 'destination')
tag = self.safe_string(item, 'destinationTag')
currencyId = self.safe_string(item, 'amountCurrency')
code = self.safe_currency_code(currencyId, currency)
type = self.safe_string_lower(item, 'transferType')
status = self.parse_transaction_status(self.safe_string(item, 'transferStatus'))
id = self.safe_string(item, 'transactionId')
return {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'currency': code,
'amount': amount,
'type': type,
'txid': txid,
'address': address,
'tag': tag,
'status': status,
'fee': {
'cost': fee,
'currency': code,
},
'info': item,
}
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
if limit is None:
limit = 1000
request = {
'limit': limit,
}
if symbol is not None:
market = self.market(symbol)
request['currencyPair'] = market['id']
if since is not None:
request['timestampFrom'] = since
response = self.privatePostTradeHistory(self.extend(request, params))
items = response['data']
return self.parse_trades(items, None, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchMyTrades(private)
#
# {
# transactionId: 2671819,
# createdTimestamp: 1529649127605,
# currencyPair: 'LTC_BTC',
# type: 'BUY',
# orderType: 'LIMIT',
# orderId: 101810227,
# amount: 0.01,
# price: 0.01406,
# fee: 0,
# feeType: 'MAKER'
# }
#
# fetchTrades(public)
#
# {
# "timestamp":1561598833416,
# "transactionId":"4156303",
# "price":10950.41,
# "amount":0.004,
# "currencyPair":"BTC_EUR",
# "tradeType":"BUY"
# }
#
marketId = self.safe_string(trade, 'currencyPair')
market = self.safe_market(marketId, market, '_')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
cost = None
if amount is not None:
if price is not None:
cost = price * amount
side = self.safe_string_lower_2(trade, 'type', 'tradeType')
type = self.safe_string_lower(trade, 'orderType')
orderId = self.safe_string(trade, 'orderId')
id = self.safe_string(trade, 'transactionId')
timestamp = self.safe_integer_2(trade, 'timestamp', 'createdTimestamp')
fee = None
feeCost = self.safe_float(trade, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': market['quote'],
}
takerOrMaker = self.safe_string(trade, 'feeType')
takerOrMaker = 'maker' if (takerOrMaker == 'MAKER') else 'taker'
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': type,
'side': side,
'order': orderId,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'currencyPair': market['id'],
'minutesIntoHistory': 10,
}
response = self.publicGetTransactions(self.extend(request, params))
#
# {
# "error":false,
# "errorMessage":null,
# "data":[
# {
# "timestamp":1561598833416,
# "transactionId":"4156303",
# "price":10950.41,
# "amount":0.004,
# "currencyPair":"BTC_EUR",
# "tradeType":"BUY"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
response = self.privatePostOpenOrders(self.extend({}, params))
extension = {'status': 'open'}
return self.parse_orders(response['data'], None, since, limit, extension)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'currencyPair': market['id'],
}
# offset param that appears in other parts of the API doesn't appear to be supported here
if limit is not None:
request['limit'] = limit
response = self.privatePostOrderHistory(self.extend(request, params))
return self.parse_orders(response['data'], market, since, limit)
def parse_order_status(self, status):
statuses = {
'FILLED': 'closed',
'CANCELLED': 'canceled',
'PARTIALLY_FILLED': 'open',
'OPEN': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order_type(self, type):
types = {
'LIMIT': 'limit',
'MARKET': 'market',
}
return self.safe_string(types, type, type)
def parse_order(self, order, market=None):
#
# limit sell
#
# {
# id: 781246605,
# timestamp: 1584480015133,
# trailingUpdatedTimestamp: null,
# type: 'SELL',
# currencyPair: 'ETH_BTC',
# price: 0.0345,
# amount: 0.01,
# stopPrice: null,
# originalStopPrice: null,
# marketPriceAtLastUpdate: null,
# marketPriceAtOrderCreation: null,
# orderTradeType: 'LIMIT',
# hidden: False,
# trailing: False,
# clientOrderId: null
# }
#
# limit buy
#
# {
# id: 67527001,
# timestamp: 1517931722613,
# trailingUpdatedTimestamp: null,
# type: 'BUY',
# price: 5897.24,
# remainingAmount: 0.002367,
# originalAmount: 0.1,
# stopPrice: null,
# originalStopPrice: null,
# marketPriceAtLastUpdate: null,
# marketPriceAtOrderCreation: null,
# status: 'CANCELLED',
# orderTradeType: 'LIMIT',
# hidden: False,
# avgPrice: null,
# trailing: False,
# }
#
id = self.safe_string(order, 'id')
timestamp = self.safe_integer(order, 'timestamp')
side = self.safe_string_lower(order, 'type')
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'originalAmount')
remaining = self.safe_float(order, 'remainingAmount')
if remaining is None:
remaining = self.safe_float(order, 'amount')
status = self.parse_order_status(self.safe_string(order, 'status'))
type = self.parse_order_type(self.safe_string(order, 'orderTradeType'))
filled = None
cost = None
if (amount is not None) and (remaining is not None):
filled = max(amount - remaining, 0)
if remaining == 0:
status = 'closed'
if price is not None:
cost = filled * price
average = self.safe_float(order, 'avgPrice')
marketId = self.safe_string(order, 'currencyPair')
symbol = self.safe_symbol(marketId, market, '_')
clientOrderId = self.safe_string(order, 'clientOrderId')
return {
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'trades': None,
'info': order,
'fee': None,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
method = 'privatePost' + self.capitalize(side)
request = {
'currencyPair': self.market_id(symbol),
}
if type == 'market':
if side == 'buy':
request['total'] = self.amount_to_precision(symbol, amount) # amount in fiat
else:
request['amount'] = self.amount_to_precision(symbol, amount) # amount in fiat
method += 'Instant'
else:
request['amount'] = self.amount_to_precision(symbol, amount) # amount in crypto
request['price'] = self.price_to_precision(symbol, price)
method += self.capitalize(type)
response = getattr(self, method)(self.extend(request, params))
id = self.safe_string(response, 'data')
return {
'info': response,
'id': id,
}
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'orderId': id,
}
market = None
if symbol:
market = self.market(symbol)
response = self.privatePostOrderById(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_order(data, market)
def cancel_order(self, id, symbol=None, params={}):
# {"error":false,"errorMessage":null,"data":{"success":true,"remainingAmount":0.01}}
request = {'orderId': id}
response = self.privatePostCancelOrderWithInfo(self.extend(request, params))
return {
'info': response,
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + path
if api == 'public':
if params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
nonce = str(self.nonce())
auth = nonce + self.uid + self.apiKey
signature = self.hmac(self.encode(auth), self.encode(self.secret))
body = self.urlencode(self.extend({
'clientId': self.uid,
'nonce': nonce,
'publicKey': self.apiKey,
'signature': signature.upper(),
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is not None:
if 'error' in response:
# {"error":true,"errorMessage":"Minimum Order Size 0.01 ETH","data":null}
if response['error']:
message = self.safe_string(response, 'errorMessage')
feedback = self.id + ' ' + message
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(self.id + ' ' + self.json(response))
if code > 400:
if body:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], body, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
raise ExchangeError(feedback) # unknown message
raise ExchangeError(self.id + ' ' + body)
| 38.691011
| 174
| 0.46301
|
fbc6fae520f1b570bd139df8fdbf83665d30280d
| 461
|
py
|
Python
|
Main_FusionLeafs.py
|
ransalmo/data-augmentation-scripts
|
d4f298c31b60cbc65ca2bee0f352497cd4cb0091
|
[
"MIT"
] | null | null | null |
Main_FusionLeafs.py
|
ransalmo/data-augmentation-scripts
|
d4f298c31b60cbc65ca2bee0f352497cd4cb0091
|
[
"MIT"
] | null | null | null |
Main_FusionLeafs.py
|
ransalmo/data-augmentation-scripts
|
d4f298c31b60cbc65ca2bee0f352497cd4cb0091
|
[
"MIT"
] | null | null | null |
import fusion_leaf.fusion_leaf_stains
leaf_source_folder = "/Users/randysalas/Desktop/data/healthy"
stains_source_folder = "/Users/randysalas/Desktop/data/strains"
destiny_folder = "/Users/randysalas/Desktop/data/generated"
to_generate = 2500
fusion_leaf.fusion_leaf_stains.generate_synthetic_images(leaf_source_folder,
stains_source_folder, destiny_folder, to_generate=to_generate)
print("Done")
| 28.8125
| 119
| 0.737527
|
5484b1d8092fd3604039bb46b2b6851892e1648f
| 6,521
|
py
|
Python
|
tensorflow/tools/test/gpu_info_lib.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/tools/test/gpu_info_lib.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/tools/test/gpu_info_lib.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes as ct
import platform
from tensorflow.core.util import test_log_pb2
from tensorflow.python.framework import errors
from tensorflow.python.platform import gfile
def _gather_gpu_devices_proc():
"""Try to gather NVidia GPU device information via /proc/driver."""
dev_info = []
for f in gfile.Glob("/proc/driver/nvidia/gpus/*/information"):
bus_id = f.split("/")[5]
key_values = dict(line.rstrip().replace("\t", "").split(":", 1)
for line in gfile.GFile(f, "r"))
key_values = dict((k.lower(), v.strip(" ").rstrip(" "))
for (k, v) in key_values.items())
info = test_log_pb2.GPUInfo()
info.model = key_values.get("model", "Unknown")
info.uuid = key_values.get("gpu uuid", "Unknown")
info.bus_id = bus_id
dev_info.append(info)
return dev_info
class CUDADeviceProperties(ct.Structure):
# See $CUDA_HOME/include/cuda_runtime_api.h for the definition of
# the cudaDeviceProp struct.
_fields_ = [
("name", ct.c_char * 256),
("totalGlobalMem", ct.c_size_t),
("sharedMemPerBlock", ct.c_size_t),
("regsPerBlock", ct.c_int),
("warpSize", ct.c_int),
("memPitch", ct.c_size_t),
("maxThreadsPerBlock", ct.c_int),
("maxThreadsDim", ct.c_int * 3),
("maxGridSize", ct.c_int * 3),
("clockRate", ct.c_int),
("totalConstMem", ct.c_size_t),
("major", ct.c_int),
("minor", ct.c_int),
("textureAlignment", ct.c_size_t),
("texturePitchAlignment", ct.c_size_t),
("deviceOverlap", ct.c_int),
("multiProcessorCount", ct.c_int),
("kernelExecTimeoutEnabled", ct.c_int),
("integrated", ct.c_int),
("canMapHostMemory", ct.c_int),
("computeMode", ct.c_int),
("maxTexture1D", ct.c_int),
("maxTexture1DMipmap", ct.c_int),
("maxTexture1DLinear", ct.c_int),
("maxTexture2D", ct.c_int * 2),
("maxTexture2DMipmap", ct.c_int * 2),
("maxTexture2DLinear", ct.c_int * 3),
("maxTexture2DGather", ct.c_int * 2),
("maxTexture3D", ct.c_int * 3),
("maxTexture3DAlt", ct.c_int * 3),
("maxTextureCubemap", ct.c_int),
("maxTexture1DLayered", ct.c_int * 2),
("maxTexture2DLayered", ct.c_int * 3),
("maxTextureCubemapLayered", ct.c_int * 2),
("maxSurface1D", ct.c_int),
("maxSurface2D", ct.c_int * 2),
("maxSurface3D", ct.c_int * 3),
("maxSurface1DLayered", ct.c_int * 2),
("maxSurface2DLayered", ct.c_int * 3),
("maxSurfaceCubemap", ct.c_int),
("maxSurfaceCubemapLayered", ct.c_int * 2),
("surfaceAlignment", ct.c_size_t),
("concurrentKernels", ct.c_int),
("ECCEnabled", ct.c_int),
("pciBusID", ct.c_int),
("pciDeviceID", ct.c_int),
("pciDomainID", ct.c_int),
("tccDriver", ct.c_int),
("asyncEngineCount", ct.c_int),
("unifiedAddressing", ct.c_int),
("memoryClockRate", ct.c_int),
("memoryBusWidth", ct.c_int),
("l2CacheSize", ct.c_int),
("maxThreadsPerMultiProcessor", ct.c_int),
("streamPrioritiesSupported", ct.c_int),
("globalL1CacheSupported", ct.c_int),
("localL1CacheSupported", ct.c_int),
("sharedMemPerMultiprocessor", ct.c_size_t),
("regsPerMultiprocessor", ct.c_int),
("managedMemSupported", ct.c_int),
("isMultiGpuBoard", ct.c_int),
("multiGpuBoardGroupID", ct.c_int),
# Pad with extra space to avoid dereference crashes if future
# versions of CUDA extend the size of this struct.
("__future_buffer", ct.c_char * 4096)
]
def _gather_gpu_devices_cudart():
"""Try to gather NVidia GPU device information via libcudart."""
dev_info = []
system = platform.system()
if system == "Linux":
libcudart = ct.cdll.LoadLibrary("libcudart.so")
elif system == "Darwin":
libcudart = ct.cdll.LoadLibrary("libcudart.dylib")
elif system == "Windows":
libcudart = ct.windll.LoadLibrary("libcudart.dll")
else:
raise NotImplementedError("Cannot identify system.")
version = ct.c_int()
rc = libcudart.cudaRuntimeGetVersion(ct.byref(version))
if rc != 0:
raise ValueError("Could not get version")
if version.value < 6050:
raise NotImplementedError("CUDA version must be between >= 6.5")
device_count = ct.c_int()
libcudart.cudaGetDeviceCount(ct.byref(device_count))
for i in range(device_count.value):
properties = CUDADeviceProperties()
rc = libcudart.cudaGetDeviceProperties(ct.byref(properties), i)
if rc != 0:
raise ValueError("Could not get device properties")
pci_bus_id = " " * 13
rc = libcudart.cudaDeviceGetPCIBusId(ct.c_char_p(pci_bus_id), 13, i)
if rc != 0:
raise ValueError("Could not get device PCI bus id")
info = test_log_pb2.GPUInfo() # No UUID available
info.model = properties.name
info.bus_id = pci_bus_id
dev_info.append(info)
del properties
return dev_info
def gather_gpu_devices():
"""Gather gpu device info.
Returns:
A list of test_log_pb2.GPUInfo messages.
"""
try:
# Prefer using /proc if possible, it provides the UUID.
dev_info = _gather_gpu_devices_proc()
if not dev_info:
raise ValueError("No devices found")
return dev_info
except (IOError, ValueError, errors.OpError):
pass
try:
# Fall back on using libcudart
return _gather_gpu_devices_cudart()
except (OSError, ValueError, NotImplementedError, errors.OpError):
return []
| 35.82967
| 81
| 0.636405
|
fea6ec906ee8fe17a40dad9a77ad927c093d9a4b
| 328
|
py
|
Python
|
SmtApi/session/session.py
|
Inaruslynx/SmtApi
|
09ecc20cb0e40f2693c67740191a570952e86b39
|
[
"MIT"
] | null | null | null |
SmtApi/session/session.py
|
Inaruslynx/SmtApi
|
09ecc20cb0e40f2693c67740191a570952e86b39
|
[
"MIT"
] | 2
|
2021-01-15T17:23:45.000Z
|
2021-01-21T16:46:14.000Z
|
SmtApi/session/session.py
|
Inaruslynx/SmtApi
|
09ecc20cb0e40f2693c67740191a570952e86b39
|
[
"MIT"
] | null | null | null |
from typing import Tuple
import requests
class SmtApiSession(requests.Session):
def __init__(self, *args, **kwargs):
super(SmtApiSession, self).__init__(*args, **kwargs)
def init_basic_auth(self, username:str, password:str, cert:Tuple):
self.auth = (username, password)
self.cert = cert
| 23.428571
| 70
| 0.676829
|
5b692607b89ac3e2e516a39298d95567250c2ee6
| 11,160
|
py
|
Python
|
tests/command tests/test_post_one_page.py
|
VTimofeenko/confluence_poster
|
4c09d487662540d5e6b3e8cde754dc766c12d4f3
|
[
"MIT"
] | 2
|
2021-06-01T05:30:11.000Z
|
2022-03-12T18:40:39.000Z
|
tests/command tests/test_post_one_page.py
|
VTimofeenko/confluence_poster
|
4c09d487662540d5e6b3e8cde754dc766c12d4f3
|
[
"MIT"
] | 36
|
2020-12-22T03:36:33.000Z
|
2021-09-09T16:06:36.000Z
|
tests/command tests/test_post_one_page.py
|
SabbathHex/confluence_poster
|
4c09d487662540d5e6b3e8cde754dc766c12d4f3
|
[
"MIT"
] | null | null | null |
from typer.testing import CliRunner
import pytest
from confluence_poster.main import app
from utils import (
clone_local_config,
generate_run_cmd,
real_confluence_config,
real_config,
confluence_instance,
mk_fake_file,
page_created,
fake_title_generator,
get_page_body,
get_page_title,
get_pages_ids_from_stdout,
get_page_id_from_stdout,
run_with_config,
join_input,
create_single_page_input,
)
from functools import partial
f"""This module requires an instance of confluence running.
The tests will be done against it using a {real_confluence_config}
This is a collection of tests on a single page"""
pytestmark = pytest.mark.online
runner = CliRunner()
mk_tmp_file = clone_local_config()
default_run_cmd = generate_run_cmd(runner=runner, app=app, default_args=["post-page"])
run_with_config = partial(run_with_config, default_run_cmd=default_run_cmd)
def run_with_title(
page_title: str = None,
fake_title=True,
*args,
**kwargs,
):
"""Helper function to create pages with specific title. Generates fake title by default"""
if page_title is None and fake_title:
page_title = next(fake_title_generator)
elif fake_title is False and page_title is None:
raise ValueError("Fake title is False and no real title was provided")
return (
run_with_config(pre_args=["--page-title", page_title], *args, **kwargs),
page_title,
)
def test_page_overridden_title(make_one_page_config):
"""Tests that the title supplied through command line is applied"""
config_file, config = make_one_page_config
result, page_title = run_with_title(
input=create_single_page_input, # create page, do not look for parent, create in root
config_file=config_file,
)
assert result.exit_code == 0
created_page_title = get_page_title(get_page_id_from_stdout(result.stdout))
assert (
created_page_title == page_title
), "Page title was not applied from command line"
assert created_page_title != config.pages[0].page_title, (
"Page title is the same as in config," " should have been overwritten"
)
def test_post_single_page_no_parent(make_one_page_config):
"""Test with good default config, to check that everything is OK. Creates a sample page in the root of the space
Author's note: mirrors setup_page fixture, but kept separately to make failures clearer"""
config_file, config = make_one_page_config
result = run_with_config(
config_file=config_file,
input=create_single_page_input,
)
assert result.exit_code == 0
assert "Looking for page" in result.stdout
assert "Should the page be created?" in result.stdout # checking the prompt
assert (
"Should the script look for a parent in space" in result.stdout
) # checking the prompt
assert "Create the page in the root" in result.stdout # checking the prompt
assert f"Could not find page '{config.pages[0].page_title}'" in result.stdout
assert "Creating page" in result.stdout
assert "Created page" in result.stdout
assert "Finished processing pages" in result.stdout
def test_not_create_if_refused(make_one_page_config):
config_file, config = make_one_page_config
result = run_with_config(
input=join_input("N"),
config_file=config_file,
)
assert result.exit_code == 0
assert (
"Not creating page" in result.stdout
), "Script did not report that page is not created"
assert not page_created(
page_title=config.pages[0].page_title
), "Page was not supposed to be created"
assert (
len(get_pages_ids_from_stdout(result.stdout)) == 0
), "Detected a page that was created!"
@pytest.mark.parametrize(
"parent_page_title_source",
["dialog", "cmdline", "config"],
ids=lambda source: f"Post page with parent title provided from {source}",
)
def test_post_single_page_with_parent(setup_page, parent_page_title_source, tmp_path):
"""Tests that the parent_page_title is applied to create the page in the proper place.
Tests scenarios of providing the parent title in dialog (through user input), as --parent-page-title argument,
or in config"""
# Create the first page, it will be the parent
config, (parent_id, parent_page_title) = setup_page(1)
page_title = next(fake_title_generator)
if parent_page_title_source == "dialog":
result, _ = run_with_title(
input=join_input("Y", "Y", parent_page_title, "Y"),
config_file=real_confluence_config,
)
assert "Which page should the script look for?" in result.stdout
assert "URL is:" in result.stdout
assert "Proceed to create the page" in result.stdout
else:
if parent_page_title_source == "cmdline":
result = run_with_config(
input=f"Y\n", # create page
pre_args=[
"--parent-page-title",
parent_page_title,
"--page-title",
page_title,
],
config_file=real_confluence_config,
)
else:
config_file = mk_tmp_file(
tmp_path=tmp_path,
key_to_update="pages.page1.page_parent_title",
value_to_update=parent_page_title,
)
result, _ = run_with_title(
input=f"Y\n", config_file=config_file # create page
)
assert (
"Which page should the script look for?" not in result.stdout
), "If the parent page title is explicitly supplied, script should not look for parent"
assert "Found page #" in result.stdout
assert result.exit_code == 0
assert get_page_id_from_stdout(
result.stdout
) in confluence_instance.get_child_id_list(parent_id)
def test_render_ok(tmp_path, setup_page):
"""Test that is supposed ot check that the page rendered confluencewiki format successfully"""
config_file, (page_id, page_title) = setup_page(1)
config = mk_tmp_file(
tmp_path,
config_to_clone=real_confluence_config,
key_to_update="pages.page1.page_file",
value_to_update="page2.confluencewiki",
)
run_with_title(page_title, config_file=config)
assert get_page_body(page_id) == "<h1>Header</h1>\n\n<p>Some text</p>"
def test_skip_in_space_root():
"""Tests that page is properly skipped if the user aborted the creation on the space root prompt"""
result, page_title = run_with_title(
input="Y\n" # do create page
"N\n" # do not look for parent
"N\n", # do not create in root
config_file=real_confluence_config,
)
assert "Looking for page" in result.stdout
assert "Should the page be created?" in result.stdout # checking the prompt
assert (
"Should the script look for a parent in space" in result.stdout
) # checking the prompt
assert "Create the page in the root" in result.stdout # checking the prompt
assert "will skip the page" in result.stdout # checking the prompt
assert result.exit_code == 0
assert (
confluence_instance.get_page_by_title(
space=real_config.pages[0].page_space, title=page_title
)
is None
), "Page should not had been created"
assert (
len(get_pages_ids_from_stdout(result.stdout)) == 0
), "Found a page number when it should not be found"
@pytest.mark.parametrize(
"action",
["create", "update"],
ids=lambda action: f"User {action}s the page with --minor-edit flag",
)
def test_minor_edit(action, make_one_page_config, tmp_path, setup_page):
"""Tests that minor edit action is recorded on the page. API does not allow creation of the
page to be a minor edit, only an update"""
if action == "create":
config_file, config = make_one_page_config
result = run_with_config(
config_file=config_file, pre_args=["--minor-edit"], input="Y\nN\nY\n"
)
page_id = get_page_id_from_stdout(result.stdout)
else:
overwrite_file, new_text, overwrite_config = mk_fake_file(
tmp_path, filename="overwrite"
)
config_file, (page_id, page_title) = setup_page(1)
result = run_with_config(
config_file=overwrite_config,
pre_args=["--page-title", page_title, "--minor-edit"],
)
assert result.exit_code == 0
last_update = confluence_instance.get_content_history(page_id).get("lastUpdated")
if action == "create":
assert not last_update.get("minorEdit"), "Creation was marked as minor edit"
else:
# Looks like Atlassian stopped exposing this in the API :( no notifications are sent out though
with pytest.raises(AssertionError):
assert last_update.get(
"minorEdit"
), "Page update was not marked as minor edit"
def test_create_page_under_nonexistent_parent(tmp_path, make_one_page_config):
"""Tries to create a page under a non-existent parent, ensures that it fails and reports"""
config_file, config = make_one_page_config
parent_page_title = next(fake_title_generator)
config_file = mk_tmp_file(
tmp_path=tmp_path,
config_to_clone=config_file,
key_to_update="pages.page1.page_parent_title",
value_to_update=parent_page_title,
)
result = run_with_config(input=f"Y\n", config_file=config_file) # create page
assert result.exit_code == 0
assert f"page '{parent_page_title}' not found" in result.stdout
assert "Skipping page" in result.stdout
def test_search_for_parent_multiple_times(make_one_page_config):
"""Checks that the user can retry searching for a parent if it is not found"""
config_file, config = make_one_page_config
attempts = 3 # how many times to try to look for parent
nl = "\n" # to work around f-string '\' limitation
result = run_with_config(
input="Y\n" # create page
f"{attempts * ('Y' + nl + next(fake_title_generator) + nl)}" # try to look
"N\n" # finally, refuse to look for parent
"Y\n", # create in root
config_file=config_file,
)
assert result.exit_code == 0
assert (
result.stdout.count("Should the script look for a parent in space")
== attempts + 1
) # +1 because refusal
def test_refuse_to_create_with_parent(setup_page):
"""Tests user's refusal to create the page when prompted for a parent page"""
config_file, (parent_id, parent_page_title) = setup_page(1)
result, page_title = run_with_title(
input=f"Y\n" # create page
f"Y\n" # look for parent
f"{parent_page_title}\n" # title of the parent
f"N\n", # no, do not create
config_file=real_confluence_config,
)
assert "Which page should the script look for?" in result.stdout
assert "URL is:" in result.stdout
assert "Should the page be created?" in result.stdout # checking the prompt
assert not page_created(page_title)
| 37.830508
| 116
| 0.678315
|
2b27d8f5dd96a399689bc536a4267de5b6e3fe08
| 672
|
py
|
Python
|
MatchMe-New/old/server/Accounts/urls.py
|
joonyoungleeduke/MatchMe
|
8ff6aeff06e5b8d6a83c531e9992a7b14f6fc074
|
[
"MIT"
] | null | null | null |
MatchMe-New/old/server/Accounts/urls.py
|
joonyoungleeduke/MatchMe
|
8ff6aeff06e5b8d6a83c531e9992a7b14f6fc074
|
[
"MIT"
] | null | null | null |
MatchMe-New/old/server/Accounts/urls.py
|
joonyoungleeduke/MatchMe
|
8ff6aeff06e5b8d6a83c531e9992a7b14f6fc074
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from Accounts.views import RegisterViewSet, LogoutView, UserViewSet
from rest_framework_simplejwt import views as jwt_views
user_create = RegisterViewSet.as_view({
'post': 'create',
})
user_id = UserViewSet.as_view({
'get': 'get_user_id',
})
urlpatterns = [
path('user/id/', user_id, name='user-id'),
path('token/obtain/', jwt_views.TokenObtainPairView.as_view(), name='token_create'), # login
path('token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token_refresh'), # periodic refresh
path('register/', user_create, name='register'),
path('logout/', LogoutView.as_view(), name='logout'),
]
| 37.333333
| 106
| 0.71875
|
9c9c23302e36abc4f6f1413ad08d3fb87aef5d47
| 1,096
|
py
|
Python
|
step3_benchmark_analysis/get_tuning_curves.py
|
njw0709/ShapeY
|
f2272f799fe779c3e4b3d0d06e88ecde9e4b039c
|
[
"MIT"
] | 1
|
2022-03-22T17:19:57.000Z
|
2022-03-22T17:19:57.000Z
|
step3_benchmark_analysis/get_tuning_curves.py
|
njw0709/ShapeY
|
f2272f799fe779c3e4b3d0d06e88ecde9e4b039c
|
[
"MIT"
] | null | null | null |
step3_benchmark_analysis/get_tuning_curves.py
|
njw0709/ShapeY
|
f2272f799fe779c3e4b3d0d06e88ecde9e4b039c
|
[
"MIT"
] | null | null | null |
from shapey.dataprocess.raw_data import AllImgPairCorrelationData, PostProcessedAllImgPairCorrelationData
from shapey.utils.configs import ShapeYConfig
from hydra import compose, initialize
import logging
log = logging.getLogger(__name__)
def get_tuning_curve(args: ShapeYConfig) -> None:
input_name = args.pipeline.step3_output
log.info('computing tuning curves...')
try:
if not args.data.cr:
resnet_output_allimgpairs = AllImgPairCorrelationData(input_name)
resnet_output_allimgpairs.compute_tuning_curves()
else:
resnet_output_allimgpairs = PostProcessedAllImgPairCorrelationData(input_name)
resnet_output_allimgpairs.compute_tuning_curves()
except Exception as e:
log.error(e)
finally:
log.info('done!')
resnet_output_allimgpairs.hdfstore.close()
if __name__ == '__main__':
with initialize(config_path="../conf", job_name="step3_analysis_tuning_curve"):
cfg = compose(config_name="config", overrides=["data.project_dir=/home/namj/ShapeY"])
get_tuning_curve(cfg)
| 42.153846
| 105
| 0.736314
|
4d1b7661b9b8b26a6da850d1997b03b6e695c163
| 19,446
|
py
|
Python
|
meeus.py
|
mcoatanhay/meeuscalc
|
707f7332944220330d4fc7608e7cba928a0b60db
|
[
"MIT"
] | null | null | null |
meeus.py
|
mcoatanhay/meeuscalc
|
707f7332944220330d4fc7608e7cba928a0b60db
|
[
"MIT"
] | null | null | null |
meeus.py
|
mcoatanhay/meeuscalc
|
707f7332944220330d4fc7608e7cba928a0b60db
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Fichier: meeus.py
# Auteur: Marc COATANHAY
"""
Implémentation des "calculs astronomiques à l'usage des amateurs"
de Jean MEEUS, édition 2014.
"""
# Import des modules
try:
import mes_modules_path
except:
pass
import coordonnees.coord as coord
import etoilescat.etoiles as etoiles
import incertitudes.incert as incert
# Définitions constantes et variables globales
jours_semaine = {
0: "dimanche", 1: "lundi", 2: "mardi", 3: "mercredi",
4: "jeudi", 5: "vendredi", 6: "samedi"}
# Définitions fonctions et classes
def avantjj0(Y, M, D):
"""
Vérifie si une date (Y, M, D) se trouve avant le jour julien 0, c'est
à dire avant le 1er janvier - 4712 à 12H (-4712, 1, 1.5).
Entrée :
Y L'année
M le numéro du mois
D le jour du mois avec éventuellement des décimales de jour
Retour :
booleen : True or False
erreur : si (Y, M, D) n'est pas une date conforme
- voir conforme(Y, M, D) -
"""
[Y, M, D] = conforme(Y, M, D)
if(Y < -4712):
return True
elif(Y == -4712):
return ((M == 1) and (D < 1.5))
else:
return False
def bissextile(Y):
"""
Vérifie si une année Y est bissextile.
Entrée :
Y l’année
Retour :
True
False
Erreur si Y n’est pas convertible en entier
"""
Y = int(Y)
if(Y <= 1582):
return ((Y % 4) == 0)
else:
return (
((Y % 4) == 0)
and
(((Y % 100) != 0) or ((Y % 400) == 0))
)
def calendrier(Y, M, D):
"""
Retourne le calendrier (Julien ou Gregorien) qui correspond à une date.
Entrée :
Y l’année
M le numéro du mois
D le jour du mois avec éventuellement des décimales de jour
Retour :
« Julien »
« Grégorien »
Erreur si (Y, M, D) n’est pas une date conforme
- voir conforme(Y, M, D) -
"""
[Y, M, D] = conforme(Y, M, D)
if(Y > 1582):
return "Grégorien"
elif (Y == 1582):
if(M > 10):
return "Grégorien"
elif(M == 10):
if(D < 5):
return "Julien"
else:
return "Grégorien"
else:
return "Julien"
else:
return "Julien"
def conforme(Y, M, D):
"""
Vérifie la conformité de la date proposée.
Entrée :
Y l’année
M le numéro du mois
D le jour du mois avec éventuellement des décimales de jour
Retour :
[Y, M, D]
Y l’année
M le numéro du mois
D le jour du mois avec éventuellement des décimales de jour
Erreur si :
le mois n’est pas compris entre 1 et 12
le nombre de jours n’est pas correct pour le mois et l’année.
si la date correspond à un jour perdu lors que changement de
calendrier Julien vers Grégorien
"""
# Vérification du format de Y, M et D
Y = int(Y)
M = int(M)
D = float(D)
# Vérification du mois M et calcul du nbr max de jours
if M in [1, 3, 5, 7, 8, 10, 12]:
nbjours = 31
elif M in [4, 6, 9, 11]:
nbjours = 30
elif (M == 2):
if bissextile(Y):
nbjours = 29
else:
nbjours = 28
else:
message = "Le mois doit être compris entre 1 et 12 inclus"
raise ValueError(message)
# Vérification du jour D
if((int(D) > nbjours) or (D < 0)):
message = "Le nombre de jours du mois {}/{}"
message += " doit être compris entre 1 et {}"
message = message.format(M, Y, nbjours)
raise ValueError(message)
# Elimination des jours perdus lors du changement de calendrier Julien
# à Grégorien.
if((Y == 1582) and (M == 10) and (D >= 5) and (D < 15)):
message = "Les jours du 5 au 14 novembre 1582 inclus"
message += " n'appartiennent à aucun calendrier"
raise ValueError(message)
return [Y, M, D]
def coordonnees_moyennes(HR, A):
"""
Calcul approximatif des coordonnées moyennes de l'étoile:
en ascenscion droite
et en déclinaison de
pour l'année A.
Entrée :
HR = numéro de l'étoile
A = Année
Sortie :
dictionnaire :
'calculs' = calculs intermédiaires pour débogage
'RAf (°)' = ascension droite (°) de 0 à 360°
'DEf (°)' = déclinaison (°) de -90 à +90°
"""
calculs = []
resultat = {}
data = etoiles.etoile_data(HR)
calculs.append({'pmRA (mas/y)': data.pmRA})
pmRA = incert.i(data.pmRA)/(15*1000)
calculs.append({'pmRA (s/y)': pmRA})
calculs.append({'pmDE (mas/y)': data.pmDE})
pmDE = incert.i(data.pmDE)/1000
calculs.append({'pmDE ("s/y)': pmDE})
alpha = int(data.RAh) + incert.i(int(data.RAm))/60
alpha += incert.i(data.RAs)/3600
alpha *= 15
calculs.append({'RA0 (°)': alpha})
delta = int(data.DEsgn + data.DEd) + incert.i(int(data.DEm))/60
delta += incert.i(data.DEs)/3600
calculs.append({'DEC0 (°)': delta})
deltaA = incert.it(A - 2000, 0)
calculs.append({'DeltaA (année)': deltaA})
S = deltaA/100
calculs.append({'S (siècle) ': S})
parametres = parametres_precession(S)
m_alpha = parametres['m_alpha (s)']
n_alpha = parametres['n_alpha (s)']
n_delta = parametres['n_delta (")']
calculs.append({'m_alpha (s)': m_alpha})
calculs.append({'n_alpha (s)': n_alpha})
calculs.append({'n_delta (")': n_delta})
precRA = n_alpha*incert.sin(incert.pi*alpha/180)
precRA *= incert.tan(incert.pi*delta/180)
precRA += m_alpha
calculs.append({'precRA (s)': precRA})
precDE = n_delta*incert.cos(incert.pi*alpha/180)
calculs.append({'precDE (")': precDE})
varRA = precRA + pmRA
calculs.append({'varRA (s)': varRA})
varDE = precDE + pmDE
calculs.append({'varDE (")': varDE})
calculs.append({'delta A (an)': deltaA})
varRAtot = varRA*deltaA
calculs.append({'varRAtot (s)': varRAtot})
varDEtot = varDE*deltaA
calculs.append({'varDEtot (")': varDEtot})
RAf = alpha + varRAtot*15/3600
calculs.append({'RAf (°)': RAf})
DEf = delta + varDEtot/3600
calculs.append({'DEf (°)': DEf})
resultat['calculs'] = calculs
resultat['RAf (°)'] = RAf
resultat['DEf (°)'] = DEf
return resultat
def coordonnees_moyennes_rigoureuses(HR, A):
"""
Calcul rigoureux des coordonnées moyennes de l'étoile:
en ascenscion droite
et en déclinaison de
pour l'année A.
Entrée :
HR = numéro de l'étoile
A = Année
Sortie :
dictionnaire :
'calculs' = calculs intermédiaires pour débogage
'RAf (°)' = ascension droite (°) de 0 à 360°
'DEf (°)' = déclinaison (°) de -90 à +90°
"""
calculs = []
resultat = {}
data = etoiles.etoile_data(HR)
pmRA = incert.it(data.pmRA, 0)/1000
calculs.append({'pmRA (as/y)': pmRA})
pmDE = incert.it(data.pmDE, 0)/1000
calculs.append({'pmDE (as/y)': pmDE})
alpha = int(data.RAh) + incert.i(int(data.RAm))/60
alpha += incert.i(data.RAs)/3600
alpha *= 15
calculs.append({'RA0 (°)': alpha})
delta = int(data.DEsgn + data.DEd) + incert.i(int(data.DEm))/60
delta += incert.i(data.DEs)/3600
calculs.append({'DEC0 (°)': delta})
deltaA = incert.it(A - 2000, 0)
calculs.append({'DeltaA (année)': deltaA})
alpha1 = (alpha + pmRA*deltaA/3600)*incert.pi/180
calculs.append({'alpha1 (°)': alpha1})
delta1 = (delta + pmDE*deltaA/3600)*incert.pi/180
calculs.append({'delta1 (°)': delta1})
t = deltaA/100
calculs.append({'t (siècle)': t})
dzeta = (("2306.2181"*t + "0.30188"*t**2 + "0.017998"*t**3)/3600)*incert.pi/180
zed = (("2306.2181"*t + "1.09468"*t**2 + "0.018203"*t**3)/3600)*incert.pi/180
theta = (("2004.3109"*t + "0.42665"*t**2 + "0.041833"*t**3)/3600)*incert.pi/180
A = incert.cos(delta1)*incert.sin(alpha1 + dzeta)
B = incert.cos(theta)*incert.cos(delta1)*incert.cos(alpha1 + dzeta) \
- incert.sin(theta)*incert.sin(delta1)
C = incert.sin(theta)*incert.cos(delta1)*incert.cos(alpha1 + dzeta) \
+ incert.cos(theta)*incert.sin(delta1)
delta = incert.asin(C)
calculs.append({'delta (°)': delta})
alpha = incert.atan2(A, B) + zed
if(alpha.valeur < 0):
alpha += 2*incert.pi
calculs.append({'alpha (°)': alpha})
resultat['calculs'] = calculs
resultat['RAf (°)'] = alpha*180/incert.pi
resultat['DEf (°)'] = delta*180/incert.pi
return resultat
def coordonnees_moyennes2(HR, A):
"""
Calcul les coordonnées moyennes de l'étoile:
en ascenscion droite
et en déclinaison de
pour l'année A première méthode matricielle.
Entrée :
HR = numéro de l'étoile
A = Année
Sortie :
dictionnaire :
'calculs' = calculs intermédiaires pour débogage
'RAf (°)' = ascension droite (°) de 0 à 360°
'DEf (°)' = déclinaison (°) de -90 à +90°
"""
calculs = []
resultat = {}
data = etoiles.etoile_data(HR)
pmRA = incert.it(data.pmRA, 0)/1000
calculs.append({'pmRA (as/y)': pmRA})
pmDE = incert.it(data.pmDE, 0)/1000
calculs.append({'pmDE (as/y)': pmDE})
alpha = int(data.RAh) + incert.i(int(data.RAm))/60
alpha += incert.i(data.RAs)/3600
alpha *= 15
calculs.append({'RA0 (°)': alpha})
delta = int(data.DEsgn + data.DEd) + incert.i(int(data.DEm))/60
delta += incert.i(data.DEs)/3600
calculs.append({'DEC0 (°)': delta})
deltaA = incert.it(A - 2000, 0)
calculs.append({'DeltaA (année)': deltaA})
S = deltaA/100
calculs.append({'S (siècle)': S})
DS = deltaA/1000
calculs.append({'DS (deca siècle)': DS})
RA1 = alpha + pmRA*deltaA/3600
calculs.append({'RA1 (°)': RA1})
DE1 = delta + pmDE*deltaA/3600
calculs.append({'DE1 (°)': DE1})
psi1 = RA1*incert.pi/180
phi1 = DE1*incert.pi/180
U1 = coord.xyzdepolaire(psi1, phi1, incert.un)
calculs.append({'U1 (m)': U1})
parametres = coord.parametres_precession(DS)
calculs += parametres['calculs']
U2 = coord.rotation3(-(parametres['zeta (")']/3600)*(incert.pi/180), U1)
U3 = coord.rotation2((parametres['theta (")']/3600)*(incert.pi/180), U2)
Uf = coord.rotation3(-(parametres['z (")']/3600)*(incert.pi/180), U3)
(psif, phif, rf) = coord.polairedexyz(Uf)
if(psif.valeur < 0):
psif += incert.pi*2
resultat['calculs'] = calculs
resultat['RAf (°)'] = psif*180/incert.pi
resultat['DEf (°)'] = phif*180/incert.pi
return resultat
def coordonnees_moyennes3(HR, A):
"""
Calcul les coordonnées moyennes de l'étoile:
en ascenscion droite
et en déclinaison de
pour l'année A deuxième méthode matricielle.
Entrée :
HR = numéro de l'étoile
A = Année
Sortie :
dictionnaire :
'calculs' = calculs intermédiaires pour débogage
'RAf (°)' = ascension droite (°) de 0 à 360°
'DEf (°)' = déclinaison (°) de -90 à +90°
"""
calculs = []
resultat = {}
data = etoiles.etoile_data(HR)
pmRA = incert.it(data.pmRA, 0)/1000
calculs.append({'pmRA (as/y)': pmRA})
pmDE = incert.it(data.pmDE, 0)/1000
calculs.append({'pmDE (as/y)': pmDE})
alpha = int(data.RAh) + incert.i(int(data.RAm))/60
alpha += incert.i(data.RAs)/3600
alpha *= 15
calculs.append({'RA0 (°)': alpha})
delta = int(data.DEsgn + data.DEd) + incert.i(int(data.DEm))/60
delta += incert.i(data.DEs)/3600
calculs.append({'DEC0 (°)': delta})
deltaA = incert.it(A - 2000, 0)
calculs.append({'DeltaA (année)': deltaA})
S = deltaA/100
calculs.append({'S (siècle)': S})
DS = deltaA/1000
calculs.append({'DS (deca siècle)': DS})
RA1 = alpha + pmRA*deltaA/3600
calculs.append({'RA1 (°)': RA1})
DE1 = delta + pmDE*deltaA/3600
calculs.append({'DE1 (°)': DE1})
# psi1 = (incert.i(360) - RA1)*incert.pi/180
psi1 = RA1*incert.pi/180
phi1 = DE1*incert.pi/180
U1 = coord.xyzdepolaire(psi1, phi1, incert.un)
calculs.append({'U1 (m)': U1})
parametres = coord.parametres_precession(DS)
epsilon0 = incert.i(coord.precession_uai2000_coef['epsilon (")'][0])
calculs += parametres['calculs']
U2 = coord.rotation1((epsilon0/3600)*(incert.pi/180), U1)
U3 = coord.rotation3(-(parametres['psi (")']/3600)*(incert.pi/180), U2)
U4 = coord.rotation1(-(parametres['omega (")']/3600)*(incert.pi/180), U3)
Uf = coord.rotation3((parametres['khi (")']/3600)*(incert.pi/180), U4)
(psif, phif, rf) = coord.polairedexyz(Uf)
if(psif.valeur < 0):
psif += incert.pi*2
resultat['calculs'] = calculs
# resultat['RAf (°)'] = incert.i(360) - psif*180/incert.pi
resultat['RAf (°)'] = psif*180/incert.pi
resultat['DEf (°)'] = phif*180/incert.pi
return resultat
def date(JJ):
"""
Détermine la date du calendrier à partir du jour julien.
La méthode suivante est valable pour les années positives aussi bien
que négatives, mais non pour les jours juliens négatifs.
Entrée :
JJ le jour julien
Retour :
[Y, M, D]
Y l’année
M le numéro du mois
D le jour du mois avec éventuellement des décimales de jour
Erreur si :
JJ n’est pas convertible en float
JJ < 0
"""
JJ = float(JJ)
if(JJ < 0):
message = "Cette méthode n'est pas valable pour les jours juliens"
message += " négatifs"
raise ValueError(message)
JJ += 0.5
Z = int(JJ)
F = JJ - int(JJ)
if (Z < 2299161):
A = Z
else:
alpha = int((Z-1867216.25)/36524.25)
A = Z + 1 + alpha - int(alpha/4)
B = A + 1524
C = int((B-122.1)/365.25)
D = int(365.25*C)
E = int((B - D)/30.6001)
D = B - D - int(30.6001*E) + F
if (E < 14):
M = E - 1
else:
M = E - 13
if (M > 2):
Y = C - 4716
else:
Y = C - 4715
return [Y, M, D]
def dimanchepaques(Y):
"""
Calcul la date du jour de Pâques de l'année Y.
Entrée :
Y l’année
Retour :
[Y, M, D]
Y l’année
M le numéro du mois
D le jour du mois
Erreur si Y n’est pas convertible en entier
"""
Y = int(Y)
if(Y > 1582):
a = Y % 19
b = Y//100
c = Y % 100
d = b//4
e = b % 4
f = (b + 8)//25
g = (b - f + 1)//3
h = (19*a + b - d - g + 15) % 30
i = c//4
k = c % 4
L = (32 + 2*e + 2*i - h - k) % 7
m = (a + 11*h + 22*L)//451
n = (h + L - 7*m + 114)//31
p = (h + L - 7*m + 114) % 31
return [Y, n, p+1]
else:
a = Y % 4
b = Y % 7
c = Y % 19
d = (19*c + 15) % 30
e = (2*a + 4*b - d + 34) % 7
f = (d + e + 114) // 31
g = (d + e + 114) % 31
return [Y, f, g+1]
def jourannee(Y, M, D):
"""
Détermine le numéro de jour de l'année correspondant à une date.
Entrée :
Y l’année
M le numéro du mois
D le jour du mois avec éventuellement des décimales de jour
Retour :
N numéro du jour de l’année
Entre 1 et 365 pour une année régulière
Entre 1 et 366 pour une année bissextile
Erreur si (Y, M, D) n’est pas une date conforme
- voir conforme(Y, M, D) -
"""
[Y, M, D] = conforme(Y, M, D)
if(bissextile(Y)):
K = 1
else:
K = 2
N = int(275*M/9) - K*int((M+9)/12) + int(D) - 30
return N
def jourjulien(Y, M, D):
"""
Détermine la valeur du jour julien qui correspond à une date donnée.
Cette méthode est valable aussi bien pour les années positives que
négatives, mais pas pour des jours juliens négatifs.
Entrée :
Y l’année
M le numéro du mois
D le jour du mois avec éventuellement des décimales de jour
Retour :
JJ jour julien
Erreur si :
avantjj0(Y, M, D)
(Y, M, D) n’est pas une date conforme
- voir conforme(Y, M, D) –
"""
if avantjj0(Y, M, D):
message = "Méthode non valable pour les jours juliens négatifs"
raise ValueError(message)
calend = calendrier(Y, M, D)
if(M <= 2):
Y -= 1
M += 12
if(calend == "Grégorien"):
A = int(Y/100)
B = 2 - A + int(A/4)
else:
B = 0
return int(365.25*(Y+4716))+int(30.6001*(M+1)) + D + B - 1524.5
def jourjulien0(Y):
"""
Détermine le jour julien correspondant au 0.0 janvier de l'année Y.
Cette méthode est valable aussi bien pour les années positives que
négatives, mais pas pour des jours juliens négatifs.
Entrée :
Y l’année
Retour :
JJ jour julien
Erreur si :
Y n’est pas convertible en entier
Y < -4711
"""
Y = int(Y)
return jourjulien(Y-1, 12, 31)
def jourjulienmodif(Y, M, D):
"""
Détermine la valeur du jour julien modifié (MDJ) qui correspond
à une date donnée.
MDJ = JJ - 2 400 000,5
L'origine de cette échelle est le 17 novembre 1858 à 0h.
Cette méthode est valable aussi bien pour les années positives que
négatives, mais pas pour des jours juliens négatifs.
Entrée :
Y l’année
M le numéro du mois
D le jour du mois avec éventuellement des décimales de jour
Retour :
JJ jour julien
Erreur si :
avantjj0(Y, M, D)
(Y, M, D) n’est pas une date conforme
- voir conforme(Y, M, D) –
"""
return jourjulien(Y, M, D) - 2400000.5
def joursemaine(Y, M, D):
"""
Détermine le jour de la semaine correspondant à une date.
Entrée :
Y l’année
M le numéro du mois
D le jour du mois avec éventuellement des décimales de jour
Retour :
Jour de la semaine (lundi, mardi, ….)
Erreur si (Y, M, D) n’est pas une date conforme
- voir conforme(Y, M, D) –
"""
[Y, M, D] = conforme(Y, M, D)
JJ = jourjulien(Y, M, int(D))
return jours_semaine[int(JJ+1.5) % 7]
def parametres_precession(t):
"""
Calcul les paramètres de la précession approximative.
Entrée :
t en siècles juliens à partir de l'époque 2000.0
t = [(JJ) - (JJ)2000.0] / 36525
Retour :
dictionnaire:
m_alpha (s)
n_alpha (s)
n_delta (")
"""
resultat = {}
resultat['m_alpha (s)'] = "3.07496" + "0.00186"*t
resultat['n_alpha (s)'] = "1.33621" - "0.00057"*t
resultat['n_delta (")'] = "20.0431" - "0.0085"*t
return resultat
| 32.903553
| 83
| 0.539803
|
3080a1bcb974dd6a80efc890a746aeb2806018cf
| 670
|
py
|
Python
|
pytorch3d/transforms/__init__.py
|
michele-arrival/pytorch3d
|
f358b9b14dbc1414c588f308b35f55705d777873
|
[
"BSD-3-Clause"
] | 2
|
2020-02-08T07:08:45.000Z
|
2020-02-19T16:31:06.000Z
|
pytorch3d/transforms/__init__.py
|
michele-arrival/pytorch3d
|
f358b9b14dbc1414c588f308b35f55705d777873
|
[
"BSD-3-Clause"
] | null | null | null |
pytorch3d/transforms/__init__.py
|
michele-arrival/pytorch3d
|
f358b9b14dbc1414c588f308b35f55705d777873
|
[
"BSD-3-Clause"
] | 1
|
2020-12-12T20:42:33.000Z
|
2020-12-12T20:42:33.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .rotation_conversions import (
euler_angles_to_matrix,
matrix_to_euler_angles,
matrix_to_quaternion,
quaternion_apply,
quaternion_invert,
quaternion_multiply,
quaternion_raw_multiply,
quaternion_to_matrix,
random_quaternions,
random_rotation,
random_rotations,
standardize_quaternion,
)
from .so3 import (
so3_exponential_map,
so3_log_map,
so3_relative_angle,
so3_rotation_angle,
)
from .transform3d import Rotate, RotateAxisAngle, Scale, Transform3d, Translate
__all__ = [k for k in globals().keys() if not k.startswith("_")]
| 25.769231
| 79
| 0.749254
|
d68a008f08da788d6b6f67f568b9cd909dce6aeb
| 16,875
|
py
|
Python
|
mitmproxy/net/tls.py
|
asfaltboy/mitmproxy
|
f245d247674b0d7c7cca327cd2be5a0bcf01b27d
|
[
"MIT"
] | 1
|
2020-03-05T19:46:30.000Z
|
2020-03-05T19:46:30.000Z
|
mitmproxy/net/tls.py
|
asfaltboy/mitmproxy
|
f245d247674b0d7c7cca327cd2be5a0bcf01b27d
|
[
"MIT"
] | 2
|
2021-09-02T02:29:12.000Z
|
2021-12-08T08:49:45.000Z
|
mitmproxy/net/tls.py
|
asfaltboy/mitmproxy
|
f245d247674b0d7c7cca327cd2be5a0bcf01b27d
|
[
"MIT"
] | null | null | null |
# To enable all SSL methods use: SSLv23
# then add options to disable certain methods
# https://bugs.launchpad.net/pyopenssl/+bug/1020632/comments/3
import binascii
import io
import os
import struct
import threading
import typing
from ssl import match_hostname, CertificateError
import certifi
from OpenSSL import SSL
from kaitaistruct import KaitaiStream
import mitmproxy.options # noqa
from mitmproxy import exceptions, certs
from mitmproxy.contrib.kaitaistruct import tls_client_hello
from mitmproxy.net import check
BASIC_OPTIONS = (
SSL.OP_CIPHER_SERVER_PREFERENCE
)
if hasattr(SSL, "OP_NO_COMPRESSION"):
BASIC_OPTIONS |= SSL.OP_NO_COMPRESSION
DEFAULT_METHOD = SSL.SSLv23_METHOD
DEFAULT_OPTIONS = (
SSL.OP_NO_SSLv2 |
SSL.OP_NO_SSLv3 |
BASIC_OPTIONS
)
"""
Map a reasonable SSL version specification into the format OpenSSL expects.
Don't ask...
https://bugs.launchpad.net/pyopenssl/+bug/1020632/comments/3
"""
VERSION_CHOICES = {
"all": (SSL.SSLv23_METHOD, BASIC_OPTIONS),
# SSLv23_METHOD + NO_SSLv2 + NO_SSLv3 == TLS 1.0+
# TLSv1_METHOD would be TLS 1.0 only
"secure": (DEFAULT_METHOD, DEFAULT_OPTIONS),
"SSLv2": (SSL.SSLv2_METHOD, BASIC_OPTIONS),
"SSLv3": (SSL.SSLv3_METHOD, BASIC_OPTIONS),
"TLSv1": (SSL.TLSv1_METHOD, BASIC_OPTIONS),
"TLSv1_1": (SSL.TLSv1_1_METHOD, BASIC_OPTIONS),
"TLSv1_2": (SSL.TLSv1_2_METHOD, BASIC_OPTIONS),
}
METHOD_NAMES = {
SSL.SSLv2_METHOD: "SSLv2",
SSL.SSLv3_METHOD: "SSLv3",
SSL.SSLv23_METHOD: "SSLv23",
SSL.TLSv1_METHOD: "TLSv1",
SSL.TLSv1_1_METHOD: "TLSv1.1",
SSL.TLSv1_2_METHOD: "TLSv1.2",
}
def client_arguments_from_options(options: "mitmproxy.options.Options") -> dict:
if options.ssl_insecure:
verify = SSL.VERIFY_NONE
else:
verify = SSL.VERIFY_PEER
method, tls_options = VERSION_CHOICES[options.ssl_version_server]
return {
"verify": verify,
"method": method,
"options": tls_options,
"ca_path": options.ssl_verify_upstream_trusted_confdir,
"ca_pemfile": options.ssl_verify_upstream_trusted_ca,
"client_certs": options.client_certs,
"cipher_list": options.ciphers_server,
}
class MasterSecretLogger:
def __init__(self, filename):
self.filename = filename
self.f = None
self.lock = threading.Lock()
# required for functools.wraps, which pyOpenSSL uses.
__name__ = "MasterSecretLogger"
def __call__(self, connection, where, ret):
if where == SSL.SSL_CB_HANDSHAKE_DONE and ret == 1:
with self.lock:
if not self.f:
d = os.path.dirname(self.filename)
if not os.path.isdir(d):
os.makedirs(d)
self.f = open(self.filename, "ab")
self.f.write(b"\r\n")
client_random = binascii.hexlify(connection.client_random())
masterkey = binascii.hexlify(connection.master_key())
self.f.write(b"CLIENT_RANDOM %s %s\r\n" % (client_random, masterkey))
self.f.flush()
def close(self):
with self.lock:
if self.f:
self.f.close()
@staticmethod
def create_logfun(filename):
if filename:
return MasterSecretLogger(filename)
return None
log_master_secret = MasterSecretLogger.create_logfun(
os.getenv("MITMPROXY_SSLKEYLOGFILE") or os.getenv("SSLKEYLOGFILE")
)
def _create_ssl_context(
method: int = DEFAULT_METHOD,
options: int = DEFAULT_OPTIONS,
ca_path: str = None,
ca_pemfile: str = None,
cipher_list: str = None,
alpn_protos: typing.Iterable[bytes] = None,
alpn_select=None,
alpn_select_callback: typing.Callable[[typing.Any, typing.Any], bytes] = None,
verify: int = SSL.VERIFY_PEER,
verify_callback: typing.Optional[
typing.Callable[[SSL.Connection, SSL.X509, int, int, bool], bool]
] = None,
) -> SSL.Context:
"""
Creates an SSL Context.
:param method: One of SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD, TLSv1_1_METHOD, or TLSv1_2_METHOD
:param options: A bit field consisting of OpenSSL.SSL.OP_* values
:param verify: A bit field consisting of OpenSSL.SSL.VERIFY_* values
:param ca_path: Path to a directory of trusted CA certificates prepared using the c_rehash tool
:param ca_pemfile: Path to a PEM formatted trusted CA certificate
:param cipher_list: A textual OpenSSL cipher list, see https://www.openssl.org/docs/apps/ciphers.html
:rtype : SSL.Context
"""
try:
context = SSL.Context(method)
except ValueError:
method_name = METHOD_NAMES.get(method, "unknown")
raise exceptions.TlsException(
"SSL method \"%s\" is most likely not supported "
"or disabled (for security reasons) in your libssl. "
"Please refer to https://github.com/mitmproxy/mitmproxy/issues/1101 "
"for more details." % method_name
)
# Options (NO_SSLv2/3)
if options is not None:
context.set_options(options)
# Verify Options (NONE/PEER and trusted CAs)
if verify is not None:
context.set_verify(verify, verify_callback)
if ca_path is None and ca_pemfile is None:
ca_pemfile = certifi.where()
try:
context.load_verify_locations(ca_pemfile, ca_path)
except SSL.Error:
raise exceptions.TlsException(
"Cannot load trusted certificates ({}, {}).".format(
ca_pemfile, ca_path
)
)
# Workaround for
# https://github.com/pyca/pyopenssl/issues/190
# https://github.com/mitmproxy/mitmproxy/issues/472
# Options already set before are not cleared.
context.set_mode(SSL._lib.SSL_MODE_AUTO_RETRY)
# Cipher List
if cipher_list:
try:
context.set_cipher_list(cipher_list.encode())
except SSL.Error as v:
raise exceptions.TlsException("SSL cipher specification error: %s" % str(v))
# SSLKEYLOGFILE
if log_master_secret:
context.set_info_callback(log_master_secret)
if alpn_protos is not None:
# advertise application layer protocols
context.set_alpn_protos(alpn_protos)
elif alpn_select is not None and alpn_select_callback is None:
# select application layer protocol
def alpn_select_callback(conn_, options):
if alpn_select in options:
return bytes(alpn_select)
else: # pragma: no cover
return options[0]
context.set_alpn_select_callback(alpn_select_callback)
elif alpn_select_callback is not None and alpn_select is None:
if not callable(alpn_select_callback):
raise exceptions.TlsException("ALPN error: alpn_select_callback must be a function.")
context.set_alpn_select_callback(alpn_select_callback)
elif alpn_select_callback is not None and alpn_select is not None:
raise exceptions.TlsException(
"ALPN error: only define alpn_select (string) OR alpn_select_callback (function).")
return context
def create_client_context(
cert: str = None,
sni: str = None,
address: str = None,
verify: int = SSL.VERIFY_NONE,
**sslctx_kwargs
) -> SSL.Context:
"""
Args:
cert: Path to a file containing both client cert and private key.
sni: Server Name Indication. Required for VERIFY_PEER
address: server address, used for expressive error messages only
verify: A bit field consisting of OpenSSL.SSL.VERIFY_* values
"""
if sni is None and verify != SSL.VERIFY_NONE:
raise exceptions.TlsException("Cannot validate certificate hostname without SNI")
def verify_callback(
conn: SSL.Connection,
x509: SSL.X509,
errno: int,
depth: int,
is_cert_verified: bool
) -> bool:
if is_cert_verified and depth == 0:
# Verify hostname of leaf certificate.
cert = certs.Cert(x509)
try:
crt: typing.Dict[str, typing.Any] = dict(
subjectAltName=[("DNS", x.decode("ascii", "strict")) for x in cert.altnames]
)
if cert.cn:
crt["subject"] = [[["commonName", cert.cn.decode("ascii", "strict")]]]
if sni:
# SNI hostnames allow support of IDN by using ASCII-Compatible Encoding
# Conversion algorithm is in RFC 3490 which is implemented by idna codec
# https://docs.python.org/3/library/codecs.html#text-encodings
# https://tools.ietf.org/html/rfc6066#section-3
# https://tools.ietf.org/html/rfc4985#section-3
hostname = sni.encode("idna").decode("ascii")
else:
hostname = "no-hostname"
match_hostname(crt, hostname)
except (ValueError, CertificateError) as e:
conn.cert_error = exceptions.InvalidCertificateException(
"Certificate verification error for {}: {}".format(
sni or repr(address),
str(e)
)
)
is_cert_verified = False
elif is_cert_verified:
pass
else:
conn.cert_error = exceptions.InvalidCertificateException(
"Certificate verification error for {}: {} (errno: {}, depth: {})".format(
sni,
SSL._ffi.string(SSL._lib.X509_verify_cert_error_string(errno)).decode(),
errno,
depth
)
)
# SSL_VERIFY_NONE: The handshake will be continued regardless of the verification result.
return is_cert_verified
context = _create_ssl_context(
verify=verify,
verify_callback=verify_callback,
**sslctx_kwargs,
)
# Client Certs
if cert:
try:
context.use_privatekey_file(cert)
context.use_certificate_file(cert)
except SSL.Error as v:
raise exceptions.TlsException("SSL client certificate error: %s" % str(v))
return context
def accept_all(
conn_: SSL.Connection,
x509: SSL.X509,
errno: int,
err_depth: int,
is_cert_verified: bool,
) -> bool:
# Return true to prevent cert verification error
return True
def create_server_context(
cert: typing.Union[certs.Cert, str],
key: SSL.PKey,
handle_sni: typing.Optional[typing.Callable[[SSL.Connection], None]] = None,
request_client_cert: bool = False,
chain_file=None,
dhparams=None,
extra_chain_certs: typing.Iterable[certs.Cert] = None,
**sslctx_kwargs
) -> SSL.Context:
"""
cert: A certs.Cert object or the path to a certificate
chain file.
handle_sni: SNI handler, should take a connection object. Server
name can be retrieved like this:
connection.get_servername()
The request_client_cert argument requires some explanation. We're
supposed to be able to do this with no negative effects - if the
client has no cert to present, we're notified and proceed as usual.
Unfortunately, Android seems to have a bug (tested on 4.2.2) - when
an Android client is asked to present a certificate it does not
have, it hangs up, which is frankly bogus. Some time down the track
we may be able to make the proper behaviour the default again, but
until then we're conservative.
"""
if request_client_cert:
verify = SSL.VERIFY_PEER
else:
verify = SSL.VERIFY_NONE
context = _create_ssl_context(
ca_pemfile=chain_file,
verify=verify,
verify_callback=accept_all,
**sslctx_kwargs,
)
context.use_privatekey(key)
if isinstance(cert, certs.Cert):
context.use_certificate(cert.x509)
else:
context.use_certificate_chain_file(cert)
if extra_chain_certs:
for i in extra_chain_certs:
context.add_extra_chain_cert(i.x509)
if handle_sni:
# SNI callback happens during do_handshake()
context.set_tlsext_servername_callback(handle_sni)
if dhparams:
SSL._lib.SSL_CTX_set_tmp_dh(context._context, dhparams)
return context
def is_tls_record_magic(d):
"""
Returns:
True, if the passed bytes start with the TLS record magic bytes.
False, otherwise.
"""
d = d[:3]
# TLS ClientHello magic, works for SSLv3, TLSv1.0, TLSv1.1, TLSv1.2
# http://www.moserware.com/2009/06/first-few-milliseconds-of-https.html#client-hello
return (
len(d) == 3 and
d[0] == 0x16 and
d[1] == 0x03 and
0x0 <= d[2] <= 0x03
)
def get_client_hello(rfile):
"""
Peek into the socket and read all records that contain the initial client hello message.
client_conn:
The :py:class:`client connection <mitmproxy.connections.ClientConnection>`.
Returns:
The raw handshake packet bytes, without TLS record header(s).
"""
client_hello = b""
client_hello_size = 1
offset = 0
while len(client_hello) < client_hello_size:
record_header = rfile.peek(offset + 5)[offset:]
if not is_tls_record_magic(record_header) or len(record_header) < 5:
raise exceptions.TlsProtocolException(
'Expected TLS record, got "%s" instead.' % record_header)
record_size = struct.unpack_from("!H", record_header, 3)[0] + 5
record_body = rfile.peek(offset + record_size)[offset + 5:]
if len(record_body) != record_size - 5:
raise exceptions.TlsProtocolException(
"Unexpected EOF in TLS handshake: %s" % record_body)
client_hello += record_body
offset += record_size
client_hello_size = struct.unpack("!I", b'\x00' + client_hello[1:4])[0] + 4
return client_hello
class ClientHello:
def __init__(self, raw_client_hello):
self._client_hello = tls_client_hello.TlsClientHello(
KaitaiStream(io.BytesIO(raw_client_hello))
)
@property
def cipher_suites(self):
return self._client_hello.cipher_suites.cipher_suites
@property
def sni(self) -> typing.Optional[bytes]:
if self._client_hello.extensions:
for extension in self._client_hello.extensions.extensions:
is_valid_sni_extension = (
extension.type == 0x00 and
len(extension.body.server_names) == 1 and
extension.body.server_names[0].name_type == 0 and
check.is_valid_host(extension.body.server_names[0].host_name)
)
if is_valid_sni_extension:
return extension.body.server_names[0].host_name
return None
@property
def alpn_protocols(self):
if self._client_hello.extensions:
for extension in self._client_hello.extensions.extensions:
if extension.type == 0x10:
return list(x.name for x in extension.body.alpn_protocols)
return []
@property
def extensions(self) -> typing.List[typing.Tuple[int, bytes]]:
ret = []
if self._client_hello.extensions:
for extension in self._client_hello.extensions.extensions:
body = getattr(extension, "_raw_body", extension.body)
ret.append((extension.type, body))
return ret
@classmethod
def from_file(cls, client_conn) -> "ClientHello":
"""
Peek into the connection, read the initial client hello and parse it to obtain ALPN values.
client_conn:
The :py:class:`client connection <mitmproxy.connections.ClientConnection>`.
Returns:
:py:class:`client hello <mitmproxy.net.tls.ClientHello>`.
"""
try:
raw_client_hello = get_client_hello(client_conn)[4:] # exclude handshake header.
except exceptions.ProtocolException as e:
raise exceptions.TlsProtocolException('Cannot read raw Client Hello: %s' % repr(e))
try:
return cls(raw_client_hello)
except EOFError as e:
raise exceptions.TlsProtocolException(
f"Cannot parse Client Hello: {e!r}, Raw Client Hello: {binascii.hexlify(raw_client_hello)!r}"
)
def __repr__(self):
return f"ClientHello(sni: {self.sni}, alpn_protocols: {self.alpn_protocols})"
| 35.010373
| 116
| 0.630044
|
2bf04020b9639d669e1fef127e70214940105174
| 6,144
|
py
|
Python
|
src/lib/unittest/__init__.py
|
danmed71/skulpt
|
8dd42cf25511307082dabb6341aeaccfb505b120
|
[
"MIT"
] | null | null | null |
src/lib/unittest/__init__.py
|
danmed71/skulpt
|
8dd42cf25511307082dabb6341aeaccfb505b120
|
[
"MIT"
] | null | null | null |
src/lib/unittest/__init__.py
|
danmed71/skulpt
|
8dd42cf25511307082dabb6341aeaccfb505b120
|
[
"MIT"
] | null | null | null |
__author__ = 'bmiller'
'''
This is the start of something that behaves like
the unittest module from cpython.
'''
class TestCase:
def __init__(self):
self.numPassed = 0
self.numFailed = 0
self.assertPassed = 0
self.assertFailed = 0
self.verbose = True
self.tlist = []
testNames = {}
for name in dir(self):
if name[:4] == 'test' and name not in testNames:
self.tlist.append(getattr(self,name))
testNames[name]=True
def setup(self):
pass
def tearDown(self):
pass
def cleanName(self,funcName):
# work around skulpts lack of an __name__
funcName = str(funcName)
funcName = funcName[13:]
funcName = funcName[:funcName.find('<')-3]
return funcName
def main(self):
for func in self.tlist:
if self.verbose:
print('Running %s' % self.cleanName(func))
try:
self.setup()
self.assertPassed = 0
self.assertFailed = 0
func()
self.tearDown()
if self.assertFailed == 0:
self.numPassed += 1
else:
self.numFailed += 1
print('Tests failed in %s ' % self.cleanName(func))
except Exception as e:
self.assertFailed += 1
self.numFailed += 1
print('Test threw exception in %s (%s)' % (self.cleanName(func), e))
self.showSummary()
def assertEqual(self, actual, expected, feedback=""):
res = actual==expected
self.appendResult(res,str(actual)+' to be equal to ',expected, feedback)
def assertNotEqual(self, actual, expected, feedback=""):
res = actual != expected
self.appendResult(res,str(actual)+' to not equal ',expected,feedback)
def assertTrue(self,x, feedback=""):
res = bool(x) is True
self.appendResult(res,str(x)+' to be ',True,feedback)
def assertFalse(self,x, feedback=""):
res = not bool(x)
self.appendResult(res,str(x)+' to be ',False,feedback)
def assertIs(self,a,b, feedback=""):
res = a is b
self.appendResult(res,str(a)+' to be the same object as ',b,feedback)
def assertIsNot(self,a,b, feedback=""):
res = a is not b
self.appendResult(res,str(a)+' to not be the same object as ',b,feedback)
def assertIsNone(self,x, feedback=""):
res = x is None
self.appendResult(res,x,None,feedback)
def assertIsNotNone(self,x, feedback=""):
res = x is not None
self.appendResult(res,str(x)+' to not be ',None,feedback)
def assertIn(self,a,b, feedback=""):
res = a in b
self.appendResult(res,str(a)+' to be in ',b,feedback)
def assertNotIn(self,a,b, feedback=""):
res = a not in b
self.appendResult(res,str(a)+' to not be in ',b,feedback)
def assertIsInstance(self,a,b, feedback=""):
res = isinstance(a,b)
self.appendResult(res,str(a)+' to be an instance of ',b,feedback)
def assertNotIsInstance(self,a,b, feedback=""):
res = not isinstance(a,b)
self.appendResult(res,str(a)+' to not be an instance of ',b,feedback)
def assertAlmostEqual(self, a, b, places=7, feedback=""):
res = round(a-b, places) == 0
self.appendResult(res,str(a)+' to equal ',b,feedback)
def assertNotAlmostEqual(self, a, b, places=7, feedback=""):
res = round(a-b, places) != 0
self.appendResult(res,str(a)+' to not equal ',b,feedback)
def assertGreater(self,a,b, feedback=""):
res = a > b
self.appendResult(res,str(a)+' to be greater than ',b,feedback)
def assertGreaterEqual(self,a,b, feedback=""):
res = a >= b
self.appendResult(res,str(a)+' to be greater than or equal to ',b,feedback)
def assertLess(self,a,b, feedback=""):
res = a < b
self.appendResult(res,str(a)+' to be less than ',b,feedback)
def assertLessEqual(self,a,b, feedback=""):
res = a <= b
self.appendResult(res,str(a)+' to be less than or equal to ',b,feedback)
def appendResult(self,res,actual,expected,feedback):
if res:
msg = 'Pass'
self.assertPassed += 1
else:
msg = 'Fail: expected %s got %s ' % (str(actual),str(expected)) + feedback
print(msg)
self.assertFailed += 1
def assertRaises(self, exception, callable=None, *args, **kwds):
# with is currently not supported hence we just try and catch
if callable is None:
raise NotImplementedError("assertRaises does currently not support assert contexts")
if kwds:
raise NotImplementedError("assertRaises does currently not support **kwds")
res = False
actualerror = str(exception())
try:
callable(*args)
except exception as ex:
res = True
except Exception as inst:
actualerror = str(inst)
print("ACT = ", actualerror, str(exception()))
else:
actualerror = "No Error"
self.appendResult(res, str(exception()), actualerror, "")
def fail(self, msg=None):
if msg is None:
msg = 'Fail'
else:
msg = 'Fail: ' + msg
print(msg)
self.assertFailed += 1
def showSummary(self):
pct = self.numPassed / (self.numPassed+self.numFailed) * 100
print("Ran %d tests, passed: %d failed: %d\n" % (self.numPassed+self.numFailed,
self.numPassed, self.numFailed))
def main(verbose=False, names=None):
glob = globals() # globals() still needs work
if names == None:
names = glob
for name in names:
if issubclass(glob[name],TestCase):
try:
tc = glob[name]()
tc.verbose = verbose
tc.main()
except:
print("Uncaught Error in: ", name)
| 33.032258
| 96
| 0.558268
|
d55117979232cc6b0baf9cc597f59cd7054539f6
| 4,537
|
py
|
Python
|
msgraph/cli/command_modules/usersfunctions/azext_usersfunctions/vendored_sdks/usersfunctions/aio/operations/_usersonenotepagesparentsectionpages_operations.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | null | null | null |
msgraph/cli/command_modules/usersfunctions/azext_usersfunctions/vendored_sdks/usersfunctions/aio/operations/_usersonenotepagesparentsectionpages_operations.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | 22
|
2022-03-29T22:54:37.000Z
|
2022-03-29T22:55:27.000Z
|
msgraph/cli/command_modules/usersfunctions/azext_usersfunctions/vendored_sdks/usersfunctions/aio/operations/_usersonenotepagesparentsectionpages_operations.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class usersonenotepagesparentsectionpagesOperations:
"""usersonenotepagesparentsectionpagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_functions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def preview(
self,
user_id: str,
onenote_page_id: str,
onenote_page_id1: str,
**kwargs
) -> "models.microsoftgraphonenotepagepreview":
"""Invoke function preview.
Invoke function preview.
:param user_id: key: id of user.
:type user_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:param onenote_page_id1: key: id of onenotePage.
:type onenote_page_id1: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: microsoftgraphonenotepagepreview, or the result of cls(response)
:rtype: ~users_functions.models.microsoftgraphonenotepagepreview
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.microsoftgraphonenotepagepreview"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.preview.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
'onenotePage-id1': self._serialize.url("onenote_page_id1", onenote_page_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.odataerror, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('microsoftgraphonenotepagepreview', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
preview.metadata = {'url': '/users/{user-id}/onenote/pages/{onenotePage-id}/parentSection/pages/{onenotePage-id1}/microsoft.graph.preview()'} # type: ignore
| 44.048544
| 161
| 0.677761
|
542377c46ac01a50a3e859b5246f6f51c354dab0
| 18,516
|
py
|
Python
|
mlpf/tfmodel/utils.py
|
luqiang21/particleflow
|
a78ca76fd0b58fce0dc12ca307e3d3fe0be351ef
|
[
"Apache-2.0"
] | null | null | null |
mlpf/tfmodel/utils.py
|
luqiang21/particleflow
|
a78ca76fd0b58fce0dc12ca307e3d3fe0be351ef
|
[
"Apache-2.0"
] | null | null | null |
mlpf/tfmodel/utils.py
|
luqiang21/particleflow
|
a78ca76fd0b58fce0dc12ca307e3d3fe0be351ef
|
[
"Apache-2.0"
] | 2
|
2020-11-23T11:11:27.000Z
|
2021-12-02T20:14:56.000Z
|
import os
import yaml
from pathlib import Path
import datetime
import platform
import random
import glob
import numpy as np
from tqdm import tqdm
import re
import logging
import tensorflow as tf
import tensorflow_addons as tfa
import keras_tuner as kt
from .data import Dataset
from .onecycle_scheduler import OneCycleScheduler, MomentumOneCycleScheduler
#from .datasets import CMSDatasetFactory, DelphesDatasetFactory
def load_config(config_file_path):
with open(config_file_path, "r") as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
return cfg
def parse_config(config, ntrain=None, ntest=None, nepochs=None, weights=None):
config_file_stem = Path(config).stem
config = load_config(config)
tf.config.run_functions_eagerly(config["tensorflow"]["eager"])
n_epochs = config["setup"]["num_epochs"]
if ntrain:
config["setup"]["num_events_train"] = ntrain
if ntest:
config["setup"]["num_events_test"] = ntest
if nepochs:
config["setup"]["num_epochs"] = nepochs
if "multi_output" not in config["setup"]:
config["setup"]["multi_output"] = True
if weights is None:
config["setup"]["weights"] = weights
return config, config_file_stem
def create_experiment_dir(prefix=None, suffix=None):
if prefix is None:
train_dir = Path("experiments") / datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")
else:
train_dir = Path("experiments") / (prefix + datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f"))
if suffix is not None:
train_dir = train_dir.with_name(train_dir.name + "." + platform.node())
train_dir.mkdir(parents=True)
print("Creating experiment dir {}".format(train_dir))
return str(train_dir)
def get_best_checkpoint(train_dir):
checkpoint_list = list(Path(Path(train_dir) / "weights").glob("weights*.hdf5"))
# Sort the checkpoints according to the loss in their filenames
checkpoint_list.sort(key=lambda x: float(re.search("\d+-\d+.\d+", str(x))[0].split("-")[-1]))
# Return the checkpoint with smallest loss
return str(checkpoint_list[0])
def delete_all_but_best_checkpoint(train_dir, dry_run):
checkpoint_list = list(Path(Path(train_dir) / "weights").glob("weights*.hdf5"))
# Don't remove the checkpoint with smallest loss
if len(checkpoint_list) == 1:
raise UserWarning("There is only one checkpoint. No deletion was made.")
elif len(checkpoint_list) == 0:
raise UserWarning("Couldn't find any checkpoints. No deletion was made.")
else:
# Sort the checkpoints according to the loss in their filenames
checkpoint_list.sort(key=lambda x: float(re.search("\d+-\d+.\d+", str(x))[0].split("-")[-1]))
best_ckpt = checkpoint_list.pop(0)
for ckpt in checkpoint_list:
if not dry_run:
ckpt.unlink()
print("Removed all checkpoints in {} except {}".format(train_dir, best_ckpt))
def get_strategy():
if isinstance(os.environ.get("CUDA_VISIBLE_DEVICES"), type(None)) or len(os.environ.get("CUDA_VISIBLE_DEVICES")) == 0:
gpus = [-1]
print("WARNING: CUDA_VISIBLE_DEVICES variable is empty. \
If you don't have or intend to use GPUs, this message can be ignored.")
else:
gpus = [int(x) for x in os.environ.get("CUDA_VISIBLE_DEVICES", "-1").split(",")]
if gpus[0] == -1:
num_gpus = 0
else:
num_gpus = len(gpus)
print("num_gpus=", num_gpus)
if num_gpus > 1:
strategy = tf.distribute.MirroredStrategy()
elif num_gpus == 1:
strategy = tf.distribute.OneDeviceStrategy("gpu:0")
elif num_gpus == 0:
print("fallback to CPU")
strategy = tf.distribute.OneDeviceStrategy("cpu")
num_gpus = 0
return strategy, num_gpus
def get_lr_schedule(config, steps):
lr = float(config["setup"]["lr"])
callbacks = []
schedule = config["setup"]["lr_schedule"]
if schedule == "onecycle":
onecycle_cfg = config["onecycle"]
lr_schedule = OneCycleScheduler(
lr_max=lr,
steps=steps,
mom_min=onecycle_cfg["mom_min"],
mom_max=onecycle_cfg["mom_max"],
warmup_ratio=onecycle_cfg["warmup_ratio"],
div_factor=onecycle_cfg["div_factor"],
final_div=onecycle_cfg["final_div"],
)
callbacks.append(
MomentumOneCycleScheduler(
steps=steps,
mom_min=onecycle_cfg["mom_min"],
mom_max=onecycle_cfg["mom_max"],
warmup_ratio=onecycle_cfg["warmup_ratio"],
)
)
elif schedule == "exponentialdecay":
if config["exponentialdecay"]["decay_steps"] == "epoch":
decay_steps = int(steps / config["setup"]["num_epochs"])
else:
decay_steps = config["exponentialdecay"]["decay_steps"]
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
lr,
decay_steps=decay_steps,
decay_rate=config["exponentialdecay"]["decay_rate"],
staircase=config["exponentialdecay"]["staircase"],
)
elif schedule == "cosinedecay":
lr_schedule = tf.keras.optimizers.schedules.CosineDecay(
initial_learning_rate=lr,
decay_steps=steps,
)
else:
raise ValueError("Only supported LR schedules are 'exponentialdecay', 'cosinedecay' and 'onecycle'.")
return lr_schedule, callbacks
def get_optimizer(config, lr_schedule=None):
if lr_schedule is None:
lr = float(config["setup"]["lr"])
else:
lr = lr_schedule
if config["setup"]["optimizer"] == "adam":
cfg_adam = config["optimizer"]["adam"]
return tf.keras.optimizers.Adam(learning_rate=lr, amsgrad=cfg_adam["amsgrad"])
if config["setup"]["optimizer"] == "adamw":
cfg_adamw = config["optimizer"]["adamw"]
return tfa.optimizers.AdamW(learning_rate=lr, weight_decay=cfg_adamw["weight_decay"], amsgrad=cfg_adamw["amsgrad"])
elif config["setup"]["optimizer"] == "sgd":
cfg_sgd = config["optimizer"]["sgd"]
return tf.keras.optimizers.SGD(learning_rate=lr, momentum=cfg_sgd["momentum"], nesterov=cfg_sgd["nesterov"])
else:
raise ValueError("Only 'adam', 'adamw' and 'sgd' are supported optimizers, got {}".format(config["setup"]["optimizer"]))
def get_tuner(cfg_hypertune, model_builder, outdir, recreate, strategy):
if cfg_hypertune["algorithm"] == "random":
print("Keras Tuner: Using RandomSearch")
cfg_rand = cfg_hypertune["random"]
return kt.RandomSearch(
model_builder,
objective=cfg_rand["objective"],
max_trials=cfg_rand["max_trials"],
project_name=outdir,
overwrite=recreate,
)
elif cfg_hypertune["algorithm"] == "bayesian":
print("Keras Tuner: Using BayesianOptimization")
cfg_bayes = cfg_hypertune["bayesian"]
return kt.BayesianOptimization(
model_builder,
objective=cfg_bayes["objective"],
max_trials=cfg_bayes["max_trials"],
num_initial_points=cfg_bayes["num_initial_points"],
project_name=outdir,
overwrite=recreate,
)
elif cfg_hypertune["algorithm"] == "hyperband":
print("Keras Tuner: Using Hyperband")
cfg_hb = cfg_hypertune["hyperband"]
return kt.Hyperband(
model_builder,
objective=cfg_hb["objective"],
max_epochs=cfg_hb["max_epochs"],
factor=cfg_hb["factor"],
hyperband_iterations=cfg_hb["iterations"],
directory=outdir + "/tb",
project_name="mlpf",
overwrite=recreate,
executions_per_trial=cfg_hb["executions_per_trial"],
distribution_strategy=strategy,
)
def compute_weights_invsqrt(X, y, w):
wn = tf.cast(tf.shape(w)[-1], tf.float32) / tf.sqrt(w)
wn *= tf.cast(X[:, 0] != 0, tf.float32)
# wn /= tf.reduce_sum(wn)
return X, y, wn
def compute_weights_none(X, y, w):
wn = tf.ones_like(w)
wn *= tf.cast(X[:, 0] != 0, tf.float32)
return X, y, wn
def make_weight_function(config):
def weight_func(X,y,w):
w_signal_only = tf.where(y[:, 0]==0, 0.0, 1.0)
w_signal_only *= tf.cast(X[:, 0]!=0, tf.float32)
w_none = tf.ones_like(w)
w_none *= tf.cast(X[:, 0]!=0, tf.float32)
w_invsqrt = tf.cast(tf.shape(w)[-1], tf.float32)/tf.sqrt(w)
w_invsqrt *= tf.cast(X[:, 0]!=0, tf.float32)
w_signal_only_invsqrt = tf.where(y[:, 0]==0, 0.0, tf.cast(tf.shape(w)[-1], tf.float32)/tf.sqrt(w))
w_signal_only_invsqrt *= tf.cast(X[:, 0]!=0, tf.float32)
weight_d = {
"none": w_none,
"signal_only": w_signal_only,
"signal_only_inverse_sqrt": w_signal_only_invsqrt,
"inverse_sqrt": w_invsqrt
}
ret_w = {}
for loss_component, weight_type in config["sample_weights"].items():
ret_w[loss_component] = weight_d[weight_type]
return X,y,ret_w
return weight_func
def targets_multi_output(num_output_classes):
def func(X, y, w):
msk = tf.expand_dims(tf.cast(y[:, :, 0]!=0, tf.float32), axis=-1)
return (
X,
{
"cls": tf.one_hot(tf.cast(y[:, :, 0], tf.int32), num_output_classes),
"charge": y[:, :, 1:2]*msk,
"pt": y[:, :, 2:3]*msk,
"eta": y[:, :, 3:4]*msk,
"sin_phi": y[:, :, 4:5]*msk,
"cos_phi": y[:, :, 5:6]*msk,
"energy": y[:, :, 6:7]*msk,
},
w,
)
return func
def get_dataset_def(config):
cds = config["dataset"]
return Dataset(
num_input_features=int(cds["num_input_features"]),
num_output_features=int(cds["num_output_features"]),
padded_num_elem_size=int(cds["padded_num_elem_size"]),
schema=cds["schema"],
)
def get_train_val_datasets(config, global_batch_size, n_train, n_test, repeat=True):
dataset_def = get_dataset_def(config)
tfr_files = sorted(glob.glob(dataset_def.processed_path))
if len(tfr_files) == 0:
raise Exception("Could not find any files in {}".format(dataset_def.processed_path))
random.shuffle(tfr_files)
dataset = tf.data.TFRecordDataset(tfr_files).map(
dataset_def.parse_tfr_element, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
# Due to TFRecords format, the length of the dataset is not known beforehand
num_events = 0
for _ in dataset:
num_events += 1
print("dataset loaded, len={}".format(num_events))
weight_func = make_weight_function(config)
assert n_train + n_test <= num_events
# Padded shapes
ps = (
tf.TensorShape([dataset_def.padded_num_elem_size, dataset_def.num_input_features]),
tf.TensorShape([dataset_def.padded_num_elem_size, dataset_def.num_output_features]),
{
"cls": tf.TensorShape([dataset_def.padded_num_elem_size, ]),
"charge": tf.TensorShape([dataset_def.padded_num_elem_size, ]),
"energy": tf.TensorShape([dataset_def.padded_num_elem_size, ]),
"pt": tf.TensorShape([dataset_def.padded_num_elem_size, ]),
"eta": tf.TensorShape([dataset_def.padded_num_elem_size, ]),
"sin_phi": tf.TensorShape([dataset_def.padded_num_elem_size, ]),
"cos_phi": tf.TensorShape([dataset_def.padded_num_elem_size, ]),
}
)
ds_train = dataset.take(n_train).map(weight_func).padded_batch(global_batch_size, padded_shapes=ps)
ds_test = dataset.skip(n_train).take(n_test).map(weight_func).padded_batch(global_batch_size, padded_shapes=ps)
if config["setup"]["multi_output"]:
dataset_transform = targets_multi_output(config["dataset"]["num_output_classes"])
ds_train = ds_train.map(dataset_transform)
ds_test = ds_test.map(dataset_transform)
else:
dataset_transform = None
return ds_train, ds_test, dataset_transform
def prepare_val_data(config, dataset_def, single_file=False):
if single_file:
val_filelist = dataset_def.val_filelist[:1]
else:
val_filelist = dataset_def.val_filelist
if config["setup"]["num_val_files"] > 0:
val_filelist = val_filelist[: config["setup"]["num_val_files"]]
Xs = []
ygens = []
ycands = []
for fi in tqdm(val_filelist, desc="Preparing validation data"):
X, ygen, ycand = dataset_def.prepare_data(fi)
Xs.append(np.concatenate(X))
ygens.append(np.concatenate(ygen))
ycands.append(np.concatenate(ycand))
assert len(Xs) > 0, "Xs is empty"
X_val = np.concatenate(Xs)
ygen_val = np.concatenate(ygens)
ycand_val = np.concatenate(ycands)
return X_val, ygen_val, ycand_val
def get_heptfds_dataset(dataset_name, config, num_gpus, split, num_events=None):
cds = config["dataset"]
if cds['schema'] == "cms":
dsf = CMSDatasetFactory(config)
elif cds['schema'] == "delphes":
dsf = DelphesDatasetFactory(config)
else:
raise ValueError("Only supported datasets are 'cms' and 'delphes'.")
ds, ds_info = dsf.get_dataset(dataset_name, config["datasets"][dataset_name], split)
#bs = config["datasets"][dataset_name]["batch_per_gpu"]
if not (num_events is None):
ds = ds.take(num_events)
#ds = ds.batch(bs)
ds = ds.map(dsf.get_map_to_supervised())
return ds, ds_info
def load_and_interleave(dataset_names, config, num_gpus, split, batch_size):
datasets = []
steps = []
for ds_name in dataset_names:
ds, _ = get_heptfds_dataset(ds_name, config, num_gpus, split)
num_steps = 0
for elem in ds:
num_steps += 1
print("Loaded {}:{} with {} steps".format(ds_name, split, num_steps))
datasets.append(ds)
steps.append(num_steps)
#Now interleave elements from the datasets randomly
ids = 0
indices = []
for ds, num_steps in zip(datasets, steps):
indices += num_steps*[ids]
ids += 1
indices = np.array(indices, np.int64)
np.random.shuffle(indices)
choice_dataset = tf.data.Dataset.from_tensor_slices(indices)
ds = tf.data.experimental.choose_from_datasets(datasets, choice_dataset)
bs = batch_size
if num_gpus>1:
bs = bs*num_gpus
ds = ds.batch(bs)
return ds
#Load multiple datasets and mix them together
def get_datasets(datasets_to_interleave, config, num_gpus, split):
datasets = []
steps = []
for joint_dataset_name in datasets_to_interleave.keys():
ds_conf = datasets_to_interleave[joint_dataset_name]
if ds_conf["datasets"] is None:
logging.warning("No datasets in {} list.".format(joint_dataset_name))
else:
interleaved_ds = load_and_interleave(ds_conf["datasets"], config, num_gpus, split, ds_conf["batch_per_gpu"])
num_steps = 0
for elem in interleaved_ds:
num_steps += 1
print("Interleaved joint dataset {} with {} steps".format(joint_dataset_name, num_steps))
datasets.append(interleaved_ds)
steps.append(num_steps)
ids = 0
indices = []
for ds, num_steps in zip(datasets, steps):
indices += num_steps*[ids]
ids += 1
indices = np.array(indices, np.int64)
np.random.shuffle(indices)
choice_dataset = tf.data.Dataset.from_tensor_slices(indices)
ds = tf.data.experimental.choose_from_datasets(datasets, choice_dataset)
num_steps = 0
for elem in ds:
num_steps += 1
print("Final dataset with {} steps".format(num_steps))
return ds, num_steps
def set_config_loss(config, trainable):
if trainable == "classification":
config["dataset"]["pt_loss_coef"] = 0.0
config["dataset"]["eta_loss_coef"] = 0.0
config["dataset"]["sin_phi_loss_coef"] = 0.0
config["dataset"]["cos_phi_loss_coef"] = 0.0
config["dataset"]["energy_loss_coef"] = 0.0
elif trainable == "regression":
config["dataset"]["classification_loss_coef"] = 0.0
config["dataset"]["charge_loss_coef"] = 0.0
config["dataset"]["pt_loss_coef"] = 0.0
config["dataset"]["eta_loss_coef"] = 0.0
config["dataset"]["sin_phi_loss_coef"] = 0.0
config["dataset"]["cos_phi_loss_coef"] = 0.0
elif trainable == "all":
pass
return config
def get_class_loss(config):
if config["setup"]["classification_loss_type"] == "categorical_cross_entropy":
cls_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False, label_smoothing=config["setup"].get("classification_label_smoothing", 0.0))
elif config["setup"]["classification_loss_type"] == "sigmoid_focal_crossentropy":
cls_loss = tfa.losses.sigmoid_focal_crossentropy
else:
raise KeyError("Unknown classification loss type: {}".format(config["setup"]["classification_loss_type"]))
return cls_loss
def get_loss_from_params(input_dict):
input_dict = input_dict.copy()
loss_type = input_dict.pop("type")
loss_cls = getattr(tf.keras.losses, loss_type)
return loss_cls(**input_dict)
def get_loss_dict(config):
cls_loss = get_class_loss(config)
default_loss = {"type": "MeanSquaredError"}
loss_dict = {
"cls": cls_loss,
"charge": get_loss_from_params(config["dataset"].get("charge_loss", default_loss)),
"pt": get_loss_from_params(config["dataset"].get("pt_loss", default_loss)),
"eta": get_loss_from_params(config["dataset"].get("eta_loss", default_loss)),
"sin_phi": get_loss_from_params(config["dataset"].get("sin_phi_loss", default_loss)),
"cos_phi": get_loss_from_params(config["dataset"].get("cos_phi_loss", default_loss)),
"energy": get_loss_from_params(config["dataset"].get("energy_loss", default_loss)),
}
loss_weights = {
"cls": config["dataset"]["classification_loss_coef"],
"charge": config["dataset"]["charge_loss_coef"],
"pt": config["dataset"]["pt_loss_coef"],
"eta": config["dataset"]["eta_loss_coef"],
"sin_phi": config["dataset"]["sin_phi_loss_coef"],
"cos_phi": config["dataset"]["cos_phi_loss_coef"],
"energy": config["dataset"]["energy_loss_coef"],
}
return loss_dict, loss_weights
| 36.37721
| 153
| 0.640311
|
2c004bfc253190c94fa11b24f7cff3f8a7d5e1fe
| 295
|
py
|
Python
|
examples/plot_sequence_distribution.py
|
pysan-dev/pysan_classic
|
27f6133bc0c5d299376e8d181fc7fcdee73bc657
|
[
"BSD-3-Clause"
] | 1
|
2021-09-12T09:46:55.000Z
|
2021-09-12T09:46:55.000Z
|
examples/plot_sequence_distribution.py
|
pysan-dev/pysan_classic
|
27f6133bc0c5d299376e8d181fc7fcdee73bc657
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plot_sequence_distribution.py
|
pysan-dev/pysan_classic
|
27f6133bc0c5d299376e8d181fc7fcdee73bc657
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Create a sequence distribution plot
==================================================
Objective
------------
write objective here
Solution
------------
write solution here
"""
import pysan as ps
sequence = [1,1,1,2,2,3,2,2,3,3,2,1,1,2,3,3,3,2,2,2,3,2,1,1]
ps.plot_sequence(sequence)
| 14.047619
| 60
| 0.518644
|
ba24a66e1e601cf490c65ffadf41d1e3029733c1
| 12,137
|
py
|
Python
|
fhir/lung-cancer/inference/main.py
|
kourtneyshort/healthcare
|
1d1e2375304ac99f43a8b6aee7374fcdf641eb6f
|
[
"Apache-2.0"
] | 310
|
2018-02-23T01:40:01.000Z
|
2022-03-30T12:25:56.000Z
|
fhir/lung-cancer/inference/main.py
|
kourtneyshort/healthcare
|
1d1e2375304ac99f43a8b6aee7374fcdf641eb6f
|
[
"Apache-2.0"
] | 189
|
2018-06-19T15:32:10.000Z
|
2022-03-11T23:48:14.000Z
|
fhir/lung-cancer/inference/main.py
|
animesh/healthcare
|
7d3d4dc9deb3d31eab99035780ccb9a44f00b687
|
[
"Apache-2.0"
] | 165
|
2018-03-06T19:29:18.000Z
|
2022-03-21T10:53:45.000Z
|
#!/usr/bin/python3
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Lung cancer Cloud Function.
Cloud Functions implementation which takes a patient bundle from a FHIR
Store whenever a Patient, Observation or Condition is changed, runs prediction
against a pre-trained model and writes the results back to the same FHIR Store.
"""
import base64
import datetime
import json
import logging
import os
import googleapiclient.discovery
from shared import features
from shared import utils
import google.auth
from google.auth.transport.urllib3 import AuthorizedHttp
# These should be passed in through deployment.
MODEL = os.environ.get('MODEL')
VERSION = os.environ.get('VERSION')
FHIR_STORE_ENDPOINT_PREFIX = 'https://healthcare.googleapis.com/v1beta1'
CREATE_RESOURCE_ACTION = 'CreateResource'
UPDATE_RESOURCE_ACTION = 'UpdateResource'
RISKS = ['negligible', 'low', 'moderate', 'high']
LOGGER = logging.getLogger('main')
def get_resource(http, resource_name):
"""Fetches a resource from the FHIR Store.
Args:
http (google.auth.transport.urllib3.AuthorizedHttp):
resource_name (str): the name of the resource, e.g. 'projects/my-project
/locations/us-central1/datasets/my-dataset/fhirStores/my-store
/fhir/Patient/patient-id'
Returns:
Object: the resource loaded from the FHIR Store.
"""
response = http.request('GET', format_url(resource_name))
if response.status > 299:
LOGGER.critical('Failed to retrieve resource %s, response: %s',
resource_name, response.data)
return None
return json.loads(response.data)
def build_risk_assessment(pid, risk, rid=None):
"""Builds a risk assessment JSON object.
Args:
pid (str): the patient ID for this risk assessment.
risk (float): the predicted risk of lung cancer.
rid (str): a previous risk assessment's ID that this one will overwrite.
Returns:
str: JSON representation of a RiskAssessment resource.
"""
risk_assessment = {
'resourceType':
utils.RISKASSESSMENT_TYPE,
'basis': [{
'reference': pid
}],
'status':
'final',
'subject': {
'reference': pid
},
'occurrenceDateTime':
datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'prediction': [{
'outcome': {
'coding': [{
'system': utils.SNOMED_SYSTEM,
'code': '162573006',
'display': 'Suspected lung cancer (situation)',
}],
'text': 'Suspected lung cancer (situation)',
},
'qualitativeRisk': {
'coding': [{
'system': 'http://hl7.org/fhir/risk-probability',
'code': risk
}]
}
}]
}
if rid is not None:
risk_assessment['id'] = rid
return json.dumps(risk_assessment)
def get_action(data):
"""Reads operation action (e.g. Create or Update) from pubsub message."""
if data['attributes'] is not None:
return data['attributes']['action']
return None
def format_url(path, query=None):
"""Formats request URL with path and query string."""
if query is None:
return '{}/{}'.format(FHIR_STORE_ENDPOINT_PREFIX, path)
else:
return '{}/{}?{}'.format(FHIR_STORE_ENDPOINT_PREFIX, path, query)
def create_or_update_resource(http, path, payload):
"""Writes a resource to the FHIR Store.
Args:
http (google.auth.transport.urllib3.AuthorizedHttp):
path (str): path to the endpoint, e.g. 'projects/my-project
/locations/us-central1/datasets/my-dataset/fhirStores/my-store
/fhir/Patient' for create requests and 'projects/my-project
/locations/us-central1/datasets/my-dataset/fhirStores/my-store
/fhir/Patient/patient-id' for update requests.
payload (str): resource to be written to the FHIR Store.
Returns:
Object: the resource from the server, usually this is an
OperationOutcome resource if there is anything wrong.
"""
# Determine which HTTP method we need to use: POST for create, and PUT for
# update. The path of update requests have one more component than create
# requests.
method = 'POST' if path.count('/') == 9 else 'PUT'
response = http.request(
method,
format_url(path),
body=payload,
headers={'Content-Type': 'application/fhir+json;charset=utf-8'})
if response.status > 299:
LOGGER.error('Failed to create or update resource %s, response: %s',
payload, response.data)
return None
return json.loads(response.data)
def search_resource(http, path, query):
"""Searches a resource in the FHIR Store.
Args:
http (google.auth.transport.urllib3.AuthorizedHttp):
path (str): path to the search endpoint, e.g. 'projects/my-project
/locations/us-central1/datasets/my-dataset/fhirStores/my-store
/fhir/Patient'
query (str): query parameter, e.g. 'age=gt30'
Returns:
List[dict]: a list of resources matching the search criteria.
"""
response = http.request('GET', format_url(path, query=query))
if response.status > 299:
LOGGER.error('Failed to search resource %s, response: %s', query,
response.data)
return None
bundle = json.loads(response.data)
return [e['resource'] for e in bundle.get('entry', [])]
def filter_resource(resources):
"""Finds a RiskAssessment.
A patient may have multiple risk assessments, so we filter them to find the
one about this specified disease
Args:
resources (List[Object]): the lung cancer risk assessment or None.
"""
return next(
(res for res in resources
if utils.extract_disease(res) == 'Suspected lung cancer (situation)'),
None)
def predict(example):
"""Sends features to Cloud ML Engine for online prediction.
Args:
example (dict): features to be fed into the model for prediction.
Returns:
Mapping[str: any]: dictionary of prediction results defined by the model.
"""
service = googleapiclient.discovery.build('ml', 'v1', cache_discovery=False)
name = 'projects/{p}/models/{m}/versions/{v}'.format(
p=os.environ.get('GCP_PROJECT'),
m=MODEL,
v=VERSION,
)
del example['has_cancer']
response = service.projects().predict(
name=name, body={
'instances': [example],
}).execute()
if 'error' in response:
LOGGER.error('Prediction failed: %s', response['error'])
return None
return response['predictions']
def get_patient_everything(http, patient_name):
response = http.request(
'GET', '{}/{}/$everything'.format(FHIR_STORE_ENDPOINT_PREFIX,
patient_name))
if response.status > 299:
LOGGER.critical('Failed to retrieve resource %s, response: %s',
patient_name, response.data)
return None
return json.loads(response.data)
def create_or_update_risk_assessment(http, patient_name, predictions, action):
"""Creates or updates a risk assessment (if one already exists for the given patient)."""
scores = predictions[0]['probabilities']
LOGGER.info('Prediction results: %s', scores)
# Last element represents risk.
score = scores[1]
risk = RISKS[min(int(score / 0.015), len(RISKS) - 1)]
project_id, location, dataset_id, fhir_store_id, pid = _parse_resource_name(
patient_name)
path = _construct_resource_name(project_id, location, dataset_id,
fhir_store_id, utils.RISKASSESSMENT_TYPE)
if action == UPDATE_RESOURCE_ACTION:
resources = search_resource(http, path, 'subject=' + pid)
res = filter_resource(resources)
if res is None:
LOGGER.info('No existing RiskAssessment, creating a new one...')
create_or_update_resource(http, path, build_risk_assessment(pid, risk))
return
rid = res['id']
path = _construct_resource_name(
project_id, location, dataset_id, fhir_store_id,
'{}/{}'.format(utils.RISKASSESSMENT_TYPE, rid))
create_or_update_resource(http, path,
build_risk_assessment(pid, risk, rid=rid))
elif action == CREATE_RESOURCE_ACTION:
create_or_update_resource(http, path, build_risk_assessment(pid, risk))
def get_corresponding_patient(http, resource_name, resource):
"""Gets the patient referenced by resource, or just returns resource if it's a patient."""
if resource['resourceType'] == utils.PATIENT_TYPE:
return resource
if resource['resourceType'] == utils.CONDITION_TYPE:
ref = resource['subject']['reference']
elif resource['resourceType'] == utils.OBSERVATION_TYPE:
ref = resource['subject']['reference']
if utils.PATIENT_TYPE not in ref:
return None
project_id, location, dataset_id, fhir_store_id, _ = _parse_resource_name(
resource_name)
patient_name = _construct_resource_name(project_id, location, dataset_id,
fhir_store_id, ref)
return get_resource(http, patient_name)
# pylint: disable=unused-argument
def main(data, context):
"""Extracts features from a patient bundle for online prediction.
This process is broken down into a few steps:
1. Fetch the Resource we get triggered on, and fetch/extract the patient that
it is related to.
2. Fetch everything for the patient from step 1, and extract the
features we are interested in.
3. Send the features to Cloud ML for online prediction, and write the
results back to the FHIR store.
Args:
data (dict): Cloud PubSub payload. The `data` field is what we are looking
for.
context (google.cloud.functions.Context): Metadata for the event.
"""
if 'data' not in data:
LOGGER.info('`data` field is not present, skipping...')
return
resource_name = base64.b64decode(data['data']).decode('utf-8')
if (utils.CONDITION_TYPE not in resource_name and
utils.PATIENT_TYPE not in resource_name and
utils.OBSERVATION_TYPE not in resource_name):
LOGGER.info('Skipping resource %s which is irrelevant for prediction.',
resource_name)
return
credentials, _ = google.auth.default()
http = AuthorizedHttp(credentials)
resource = get_resource(http, resource_name)
if resource is None:
return
patient = get_corresponding_patient(http, resource_name, resource)
if patient is None:
LOGGER.error('Could not find corresponding patient in resource %s',
resource_name)
return
project_id, location, dataset_id, fhir_store_id, _ = _parse_resource_name(
resource_name)
patient_id = 'Patient/{}'.format(patient['id'])
patient_name = _construct_resource_name(project_id, location, dataset_id,
fhir_store_id, patient_id)
patient_bundle = get_patient_everything(http, patient_name)
if patient_bundle is None:
return
predictions = predict(features.build_example(patient_bundle))
if predictions is None:
return
action = get_action(data)
create_or_update_risk_assessment(http, patient_name, predictions, action)
def _parse_resource_name(name):
"""Extracts project id, location, dataset id etc from the resource name."""
parts = name.split('/')
return parts[1], parts[3], parts[5], parts[7], '{}/{}'.format(
parts[9], parts[10])
def _construct_resource_name(project_id, location, dataset_id, fhir_store_id,
resource_id):
"""Constructs a resource name."""
return '/'.join([
'projects', project_id, 'locations', location, 'datasets', dataset_id,
'fhirStores', fhir_store_id, 'fhir', resource_id
])
| 32.980978
| 92
| 0.681552
|
93244c8e8eaef4e4fc43d8454d2947ad58fd8cec
| 929
|
py
|
Python
|
series/migrations/0007_auto_20191108_1134.py
|
AdventistChurchFinland/hopechannel-wagtail
|
b5b06e0696a929d5d2e29a368002d27f54a8ff75
|
[
"MIT"
] | null | null | null |
series/migrations/0007_auto_20191108_1134.py
|
AdventistChurchFinland/hopechannel-wagtail
|
b5b06e0696a929d5d2e29a368002d27f54a8ff75
|
[
"MIT"
] | 9
|
2020-06-05T23:26:12.000Z
|
2021-06-17T20:23:14.000Z
|
series/migrations/0007_auto_20191108_1134.py
|
AdventistChurchFinland/hopechannel-wagtail
|
b5b06e0696a929d5d2e29a368002d27f54a8ff75
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.7 on 2019-11-08 09:34
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('series', '0006_auto_20191108_1130'),
]
operations = [
migrations.AlterField(
model_name='seriespage',
name='content',
field=wagtail.core.fields.StreamField([('related_series', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(help_text='Title for the promoted series block', required=False)), ('disable_info', wagtail.core.blocks.BooleanBlock(help_text='Disable the display of series info (visible as default)', required=False)), ('series', wagtail.core.blocks.ListBlock(wagtail.core.blocks.PageChooserBlock(can_choose_root=False, label='Series', page_type=['series.SeriesPage'], required=False)))]))], blank=True, null=True),
),
]
| 44.238095
| 543
| 0.710441
|
7c7952b68dd75ae1d37c6909ac182fcfc8f2efd0
| 1,010
|
py
|
Python
|
isi_sdk_8_0/test/test_directory_query_scope_conditions.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_0/test/test_directory_query_scope_conditions.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_0/test/test_directory_query_scope_conditions.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0
from isi_sdk_8_0.models.directory_query_scope_conditions import DirectoryQueryScopeConditions # noqa: E501
from isi_sdk_8_0.rest import ApiException
class TestDirectoryQueryScopeConditions(unittest.TestCase):
"""DirectoryQueryScopeConditions unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDirectoryQueryScopeConditions(self):
"""Test DirectoryQueryScopeConditions"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0.models.directory_query_scope_conditions.DirectoryQueryScopeConditions() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.634146
| 115
| 0.736634
|
17a8304ef845e9cc03cb7439986746111e5622ac
| 3,499
|
py
|
Python
|
setup.py
|
wholesomegarden/WhatsappReminder
|
c6814c28b78738e36f2a82df65c4bc6714ce68da
|
[
"MIT"
] | 1
|
2021-05-04T10:19:51.000Z
|
2021-05-04T10:19:51.000Z
|
setup.py
|
wholesomegarden/WhatsappReminder
|
c6814c28b78738e36f2a82df65c4bc6714ce68da
|
[
"MIT"
] | null | null | null |
setup.py
|
wholesomegarden/WhatsappReminder
|
c6814c28b78738e36f2a82df65c4bc6714ce68da
|
[
"MIT"
] | null | null | null |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import ast
# To use a consistent encoding
from codecs import open
import os
# Always prefer setuptools over distutils
from setuptools import setup
PACKAGE_NAME = "webwhatsapi"
path = os.path.join(os.path.dirname(__file__), PACKAGE_NAME, "__init__.py")
with open(path, "r") as file:
t = compile(file.read(), path, "exec", ast.PyCF_ONLY_AST)
for node in (n for n in t.body if isinstance(n, ast.Assign)):
if len(node.targets) != 1:
continue
name = node.targets[0]
if not isinstance(name, ast.Name) or name.id not in (
"__version__",
"__version_info__",
"VERSION",
):
continue
v = node.value
if isinstance(v, ast.Str):
version = v.s
break
if isinstance(v, ast.Tuple):
r = []
for e in v.elts:
if isinstance(e, ast.Str):
r.append(e.s)
elif isinstance(e, ast.Num):
r.append(str(e.n))
version = ".".join(r)
break
# Get the long description from the README file
with open(os.path.join(os.path.dirname(__file__), "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="webwhatsapi",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description="A python interface for Whatsapp Web",
long_description=long_description,
# The project's main homepage.
url="https://github.com/mukulhase/WhatsAPI",
download_url="https://github.com/mukulhase/WhatsAPI/archive/{}.tar.gz".format(
version
),
# Author details
author="Mukul Hase",
author_email="mukulhase@gmail.com",
include_package_data=True,
# Choose your license
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 4 - Beta",
# Indicate who your project is intended for
"Intended Audience :: Developers",
"Topic :: Communications :: Chat",
# Pick your license as you wish (should match "license" above)
"License :: OSI Approved :: MIT License",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
],
# What does your project relate to?
keywords="Whatsapp Chat Bot Chatbot Selenium Web Whatsapp API",
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=[PACKAGE_NAME],
install_requires=[
# 'aiohttp', see https://github.com/mukulhase/WebWhatsAPI/issues/159
"python-dateutil>=2.6.0",
"selenium>=3.4.3",
"six>=1.10.0",
"python-axolotl",
"cryptography",
"python-magic",
"timg",
"scikit-image", "pyzbar", "termcolor", 'pyzbar'
],
extras_require={},
)
| 32.700935
| 87
| 0.614175
|
e9a279c180121cd8181ad75f27df46b8cccff288
| 9,784
|
py
|
Python
|
chemlearning_data/gaussian_job.py
|
chemistry-scripts/chemlearning_data
|
f57425c063b9cfeccfc7f5e5d356bd3906b2a0db
|
[
"MIT"
] | null | null | null |
chemlearning_data/gaussian_job.py
|
chemistry-scripts/chemlearning_data
|
f57425c063b9cfeccfc7f5e5d356bd3906b2a0db
|
[
"MIT"
] | null | null | null |
chemlearning_data/gaussian_job.py
|
chemistry-scripts/chemlearning_data
|
f57425c063b9cfeccfc7f5e5d356bd3906b2a0db
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2019, E. Nicolas
"""Gaussian Job class to start job, run it and analyze it"""
import logging
import os
import shutil
from cclib.io import ccread
from cclib.parser.utils import PeriodicTable
class GaussianJob:
"""
Class that can be used as a container for Gaussian jobs.
Attributes:
- basedir (base directory, os.path object)
- name (name of computation, string)
- coordinates (list of XYZ coordinates)
- job_id (unique identifier, int)
- natoms (number of atoms, int)
- path (path in which to run current computation, os.path object)
- filenames (dict with input, (file_name.com, str)
output, (file_name.log, str)
)
- input_script (input file, list of strings)
"""
def __init__(self, basedir, name, molecule, job_id, gaussian_args):
"""Build the GaussianJob class."""
# Populate the class attributes
self._name = name
self._molecule = molecule
self._job_id = job_id
self._basedir = basedir
self.filenames = dict()
self.filenames["input"] = self.name.replace(" ", "_") + ".com"
self.filenames["output"] = self.name.replace(" ", "_") + ".log"
self._gaussian_args = gaussian_args
@property
def path(self):
"""
Computation path, calculated at will as: /basedir/my_name.00job_id/
"""
path = os.path.join(
self.basedir, self.name.replace(" ", "_") + "." + str(self.job_id).zfill(8)
)
return path
@property
def molecule(self):
"""Molecule specification (coords, natoms, etc)"""
return self._molecule
@molecule.setter
def molecule(self, value):
self._molecule = value
@property
def name(self):
"""Job Name"""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def job_id(self):
"""Job id"""
return self._job_id
@job_id.setter
def job_id(self, value):
self._job_id = value
@property
def basedir(self):
"""Directory of Gaussian job"""
return self._basedir
@basedir.setter
def basedir(self, value):
self._basedir = value
@property
def header(self):
"""Computation header"""
return self.build_header()
@property
def footer(self):
"""Computation footer"""
return self.build_footer()
@property
def gaussian_args(self):
"""
All arguments necessary for the Gaussian computation:
- Functional
- Dispersion or not ?
- Basis set (One for all atoms. Choose wisely !)
"""
return self._gaussian_args
@gaussian_args.setter
def gaussian_args(self, value):
self._gaussian_args = value
def run(self):
"""Start the job."""
# Log computation start
logging.info("Starting Gaussian: %s", str(self.name))
# Get into workdir, start gaussian, then back to basedir
os.chdir(self.path)
os.system("g16 < " + self.filenames["input"] + " > " + self.filenames["output"])
os.chdir(self.basedir)
# Log end of computation
logging.info("Gaussian finished: %s", str(self.name))
return
def extract_natural_charges(self):
"""Extract NBO Charges parsing the output file."""
# Log start
logging.info("Parsing results from computation %s", str(self.job_id))
# Get into working directory
os.chdir(self.path)
# Initialize charges list
charges = []
with open(self.filenames["output"], mode="r") as out_file:
line = "Foobar line"
while line:
line = out_file.readline()
if "Summary of Natural Population Analysis:" in line:
logging.debug("ID %s: Found NPA table.", str(self.job_id))
# We have the table we want for the charges
# Read five lines to remove the header:
# Summary of Natural Population Analysis:
#
# Natural Population
# Natural ---------------------------------------------
# Atom No Charge Core Valence Rydberg Total
# ----------------------------------------------------------------
for _ in range(0, 5):
out_file.readline()
# Then we read the actual table:
for _ in range(0, self.molecule.natoms):
# Each line follow the header with the form:
# C 1 0.92349 1.99948 3.03282 0.04422 5.07651
line = out_file.readline()
line = line.split()
charges.append(line[2])
logging.debug(
"ID %s: Charges = %s",
str(self.job_id),
" ".join([str(i) for i in charges]),
)
# We have reached the end of the table, we can break the while loop
break
# End of if 'Summary of Natural Population Analysis:'
# Get back to the base directory
os.chdir(self.basedir)
return charges
def get_coordinates(self):
"""Extract coordinates from output file."""
# Log start
logging.info("Extracting coordinates for job %s", str(self.job_id))
# Get into working directory
os.chdir(self.path)
# Parse file with cclib
data = ccread(self.filenames["output"], loglevel=logging.WARNING)
# Return the first coordinates, since it is a single point
return data.atomcoords[0]
def setup_computation(self):
"""
Set computation up before running it.
Create working directory, write input file
"""
# Create working directory
os.makedirs(self.path, mode=0o777, exist_ok=True)
logging.info("Created directory %s", self.path)
# Go into working directory
os.chdir(self.path)
# Write input file
with open(self.filenames["input"], mode="w") as input_file:
input_file.write("\n".join(self.build_input_script()))
logging.debug("Wrote file %s", self.filenames["input"])
# Get back to base directory
os.chdir(self.basedir)
def get_energies(self):
"""
Retrieve HF energies plus thermochemical corrections
:return:
"""
# Log start
logging.info("Extracting energies from %s", self.name)
# Get into working directory
os.chdir(self.path)
# Parse file with cclib
data = ccread(self.filenames["output"], loglevel=logging.WARNING)
# Return the parsed energies as a dictionary
energies = dict.fromkeys(["scfenergy", "enthalpy", "freeenergy"])
energies["scfenergy"] = data.scfenergies[-1]
energies["enthalpy"] = data.enthalpy
energies["freeenergy"] = data.freeenergy
return energies
def build_header(self):
"""
Builds the top part used for the Gaussian calculation.
List of strings expected
"""
header = list()
header.append("%NProcShared=1")
# header.append('%Mem=' + args['memory'])
route = "# " + self.gaussian_args["functional"] + " "
if self.gaussian_args["dispersion"] is not None:
route += "EmpiricalDispersion=" + self.gaussian_args["dispersion"] + " "
route += "gen freq"
header.append(route)
header.append("")
# To update probably
header.append(self.name)
header.append("")
# This is a singlet. Careful for other systems!
header.append("0 1")
logging.debug("Header: \n %s", "\n".join(header))
return header
def build_footer(self):
"""
Builds the bottom part used for the Gaussian calculation.
List of strings.
"""
footer = []
# Basis set is the same for all elements. No ECP either.
# Remove duplicates, and convert to element name
periodic_table = PeriodicTable()
elements = [periodic_table.element[el] for el in list(set(self.molecule.elements_list))]
elements = " ".join(elements)
basisset = self.gaussian_args["basisset"]
footer.append(elements + " 0")
footer.append(basisset)
footer.append("****")
footer.append("")
# footer.append("$NBO")
# # NBO_FILES should be updated to something more useful
# footer.append("FILE=NBO_FILES")
# footer.append("PLOT")
# footer.append("$END")
logging.debug("Footer: \n %s", "\n".join(footer))
return footer
def build_input_script(self):
"""Build full input script"""
script = []
# Put header
script.extend(self.header)
# Add geometry + blank line
script.extend(self.molecule.xyz_geometry())
script.append("")
# Add footer
script.extend(self.footer)
# Add two blank lines for the sake of Gaussian's weird behavior
script.append("")
script.append("")
return script
def cleanup(self):
"""Removing folders and files once everything is run and extracted"""
logging.info("Removing directory: %s", str(self.path))
shutil.rmtree(self.path)
return
| 32.290429
| 96
| 0.554783
|
1c8cc8d777e6b8f4987676a9eea7431921cccc64
| 2,148
|
py
|
Python
|
KAMC 2021.1.0.3-^.py
|
ArdaKC/KAMC
|
5a0d5fc5fda0ef2de3488e5a76d25043c7998c1c
|
[
"MIT"
] | 6
|
2021-06-04T05:27:08.000Z
|
2021-08-23T09:20:16.000Z
|
KAMC 2021.1.0.3-^.py
|
ArdaKC/KAMC
|
5a0d5fc5fda0ef2de3488e5a76d25043c7998c1c
|
[
"MIT"
] | null | null | null |
KAMC 2021.1.0.3-^.py
|
ArdaKC/KAMC
|
5a0d5fc5fda0ef2de3488e5a76d25043c7998c1c
|
[
"MIT"
] | null | null | null |
from io import StringIO
import os
import os.path
print("KCS Auto MacChanger 2021.1.0.3-^")
print("Gerekli paketleri kurdukdan sonra macı değiştirmek için 1 yazın. : Sadece Mac Değiştirmek İçin 2 Yazın : Metin belgesine ağ aygıtını kaydetmek için 3 : Kayıt Edilmiş Ağ Cihazını Kullanmak için 4 : Hakkında Kısmına Bakmak İçin 5 Yazın.")
secenekler = input("")
if secenekler == "1":
os.system("apt update")
os.system("apt install net-tools")
os.system("apt install macchanger")
os.system("ifconfig")
print("Ağcihazi komutu şuna benzer wlp3s0 wlp2s0 veya wlan0 ağ aygıtı kısmına benzer kodu girin.")
agcihazi = input("Ağ aygiti girin:")
os.system("sudo ifconfig "+ agcihazi + " down && sudo service NetworkManager stop && sudo macchanger -r "+ agcihazi+" && sudo ifconfig "+ agcihazi +" up && sudo service NetworkManager start")
print("İşlem tamamlandi!")
if secenekler == "2":
os.system("ifconfig")
print("Ağcihazi komutu şuna benzer wlp3s0 wlp2s0 veya wlan0 ağ aygıtı kısmına benzer kodu girin.")
agcihazi = input("Ağ aygiti girin:")
os.system("sudo ifconfig "+ agcihazi + " down && sudo service NetworkManager stop && sudo macchanger -r "+ agcihazi+" && sudo ifconfig "+ agcihazi +" up && sudo service NetworkManager start")
print("İşlem tamamlandi!")
os.system("exit")
pass
if secenekler == "3":
os.system("ifconfig")
print("Kayıt edilecek ağ aygıtını girin.")
print("Ağcihazi komutu şuna benzer wlp3s0 wlp2s0 veya wlan0 ağ aygıtı kısmına benzer kodu girin.")
agcihazi = input("Ağ aygiti girin:")
with open("agcihazi.txt", "w") as f:
agcihazi2 = agcihazi
f.write(agcihazi2)
print("agcihazi.txt kayıt edildi.")
os.system("exit")
if secenekler == "4":
with open("agcihazi.txt","r") as f:
icerik = f.read()
agcihazi = icerik
os.system("sudo ifconfig "+ agcihazi + " down && sudo service NetworkManager stop && sudo macchanger -r "+ agcihazi+" && sudo ifconfig "+ agcihazi +" up && sudo service NetworkManager start")
print("İşlem tamamlandi!")
os.system("exit")
pass
if secenekler == "5":
print("Yapımcı : Yeni Yapımcı : Arda KC : Eski Yapımcı Furkan Karasu.")
| 39.777778
| 243
| 0.702048
|
8a0a883617409b051074e5c530b894b9d9038cfa
| 4,463
|
py
|
Python
|
purity_fb/purity_fb_1dot3/models/admin_response.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 5
|
2017-09-08T20:47:22.000Z
|
2021-06-29T02:11:05.000Z
|
purity_fb/purity_fb_1dot3/models/admin_response.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 16
|
2017-11-27T20:57:48.000Z
|
2021-11-23T18:46:43.000Z
|
purity_fb/purity_fb_1dot3/models/admin_response.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 22
|
2017-10-13T15:33:05.000Z
|
2021-11-08T19:56:21.000Z
|
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.3 Python SDK
Pure Storage FlashBlade REST 1.3 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.3
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AdminResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[Admin]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None): # noqa: E501
"""AdminResponse - a model defined in Swagger""" # noqa: E501
self._pagination_info = None
self._items = None
self.discriminator = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""Gets the pagination_info of this AdminResponse. # noqa: E501
pagination information, only available in GET requests # noqa: E501
:return: The pagination_info of this AdminResponse. # noqa: E501
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""Sets the pagination_info of this AdminResponse.
pagination information, only available in GET requests # noqa: E501
:param pagination_info: The pagination_info of this AdminResponse. # noqa: E501
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""Gets the items of this AdminResponse. # noqa: E501
a list of administrative account objects # noqa: E501
:return: The items of this AdminResponse. # noqa: E501
:rtype: list[Admin]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this AdminResponse.
a list of administrative account objects # noqa: E501
:param items: The items of this AdminResponse. # noqa: E501
:type: list[Admin]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AdminResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AdminResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.556291
| 204
| 0.594219
|
d9411baf0e9c69075296b7f145a16c985fea89bd
| 4,221
|
py
|
Python
|
editor/editor.py
|
kennnyshiwa/Toxic-Cogs
|
0d547fde220b7e2dc69e4a77403e187f26725e4b
|
[
"MIT"
] | null | null | null |
editor/editor.py
|
kennnyshiwa/Toxic-Cogs
|
0d547fde220b7e2dc69e4a77403e187f26725e4b
|
[
"MIT"
] | null | null | null |
editor/editor.py
|
kennnyshiwa/Toxic-Cogs
|
0d547fde220b7e2dc69e4a77403e187f26725e4b
|
[
"MIT"
] | null | null | null |
from redbot.core import commands, checks
from typing import Union
import discord
class Editor(commands.Cog):
"""Allows for Administrators to edit a bot's messages by providing the new content or by copying another message"""
def __init__(self, bot):
self.bot = bot
@commands.command()
@checks.admin()
async def editmessage(
self, ctx, ecid: int, editid: int, ccid: int, *, content: Union[int, str]
):
"""Edits a message with the content of another message or the specified content.
Arguments:
- ecid: The ID of the channel of the message you are editing (Required)
- editid: The ID of the message you are editing (Required)
- ccid: The ID of the channel of the message you are copying from. If you are giving the raw content yourself, pass 0 as the channel ID. (Optional)
- content: The ID of the message that contains the contents of what you want the other message to become, or the new content of the message. (Required, integer (for message id) or text (for new content)
Examples:
`[p]editmessage <edit_channel_id> <edit_message_id> <copy_channel_id> <copy_message_id>`
`[p]editmessage <edit_channel_id> <edit_message_id> 0 New content here`
Real Examples:
`[p]editmessage 133251234164375552 578969593708806144 133251234164375552 578968157520134161`
`[p]editmessage 133251234164375552 578969593708806144 0 ah bruh`
"""
if isinstance(content, int) and ccid == 0:
return await ctx.send(
"You provided an ID of a message to copy from, but didn't provide a channel ID to get the message from."
)
# Make sure channels and IDs are all good
editchannel = self.bot.get_channel(ecid)
if not editchannel or not type(editchannel) == discord.TextChannel:
return await ctx.send("Invalid channel for the message you are editing.")
if not editchannel.permissions_for(ctx.author).manage_messages and not (
await self.bot.is_owner(ctx.author)
):
return await ctx.send("You do not have permission to edit messages in that channel.")
try:
editmessage = await editchannel.fetch_message(editid)
except discord.NotFound:
return await ctx.send(
"Invalid editing message ID, or you passed the wrong channel ID for the message."
)
except discord.Forbidden:
return await ctx.send(
"I'm not allowed to view the channel which contains the message I am editing."
)
if ccid != 0 and type(content) == int:
copychannel = self.bot.get_channel(ccid)
if not copychannel or not type(editchannel) == discord.TextChannel:
return await ctx.send("Invalid ID for channel of the message to copy from.")
try:
copymessage = await copychannel.fetch_message(content)
except discord.NotFound:
return await ctx.send(
"Invalid copying message ID, or you passed the wrong channel ID for the message."
)
except discord.Forbidden:
return await ctx.send(
"I'm not allowed to view the channel of the message from which I am copying."
)
# All checks passed
content = copymessage.content
try:
embed = copymessage.embeds[0]
except IndexError:
embed = None
try:
await editmessage.edit(content=content, embed=embed)
except discord.errors.Forbidden:
return await ctx.send("I can only edit my own messages.")
await ctx.send(f"Message successfully edited. Jump URL: {editmessage.jump_url}")
else:
try:
await editmessage.edit(content=content, embed=None)
await ctx.send(f"Message successfully edited. Jump URL: {editmessage.jump_url}")
except discord.errors.Forbidden:
await ctx.send("I can only edit my own messages.")
| 46.384615
| 215
| 0.619995
|
fe52ead09047d942e3b32d89f52e11966e254a02
| 21,395
|
py
|
Python
|
msticpy/sectools/iocextract.py
|
Dqirvin/msticpy
|
9c9f54b755bf4f2b917f30b41ab2336caf102f3e
|
[
"MIT"
] | null | null | null |
msticpy/sectools/iocextract.py
|
Dqirvin/msticpy
|
9c9f54b755bf4f2b917f30b41ab2336caf102f3e
|
[
"MIT"
] | null | null | null |
msticpy/sectools/iocextract.py
|
Dqirvin/msticpy
|
9c9f54b755bf4f2b917f30b41ab2336caf102f3e
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Module for IoCExtract class.
Uses a set of builtin regular expressions to look for Indicator of
Compromise (IoC) patterns. Input can be a single string or a pandas
dataframe with one or more columns specified as input.
The following types are built-in:
- IPv4 and IPv6
- URL
- DNS domain
- Hashes (MD5, SHA1, SHA256)
- Windows file paths
- Linux file paths (this is kind of noisy because a legal linux file
path can have almost any character) You can modify or add to the
regular expressions used at runtime.
"""
import re
from collections import defaultdict, namedtuple
from enum import Enum
from typing import Any, Dict, List, Set, Tuple, Union
from urllib.parse import unquote
import pandas as pd
from .._version import VERSION
from ..nbtools.utility import export
from .domain_utils import DomainValidator
__version__ = VERSION
__author__ = "Ian Hellen"
def _compile_regex(regex):
return re.compile(regex, re.I | re.X | re.M)
IoCPattern = namedtuple("IoCPattern", ["ioc_type", "comp_regex", "priority", "group"])
@export
class IoCType(Enum):
"""Enumeration of IoC Types."""
unknown = "unknown"
ipv4 = "ipv4"
ipv6 = "ipv6"
dns = "dns"
url = "url"
md5_hash = "md5_hash"
sha1_hash = "sha1_hash"
sha256_hash = "sha256_hash"
file_hash = "file_hash"
email = "email"
windows_path = "windows_path"
linux_path = "linux_path"
hostname = "hostname"
@classmethod
def parse(cls, value: str) -> "IoCType":
"""
Return parsed IoCType of string.
Parameters
----------
value : str
Enumeration name
Returns
-------
IoCType
IoCType matching name or unknown if no match
"""
try:
ioc_type = IoCType(value.lower())
except ValueError:
ioc_type = IoCType.unknown
return ioc_type
@export
class IoCExtract:
"""
IoC Extractor - looks for common IoC patterns in input strings.
The extract() method takes either a string or a pandas DataFrame
as input. When using the string option as an input extract will
return a dictionary of results. When using a DataFrame the results
will be returned as a new DataFrame with the following columns:
IoCType: the mnemonic used to distinguish different IoC Types
Observable: the actual value of the observable
SourceIndex: the index of the row in the input DataFrame from
which the source for the IoC observable was extracted.
The class has a number of built-in IoC regex definitions.
These can be retrieved using the ioc_types attribute.
Addition IoC definitions can be added using the add_ioc_type
method.
Note: due to some ambiguity in the regular expression patterns
for different types and observable may be returned assigned to
multiple observable types. E.g. 192.168.0.1 is a also a legal file
name in both Linux and Windows. Linux file names have a particularly
large scope in terms of legal characters so it will be quite common
to see other IoC observables (or parts of them) returned as a
possible linux path.
"""
IPV4_REGEX = r"(?P<ipaddress>(?:[0-9]{1,3}\.){3}[0-9]{1,3})"
IPV6_REGEX = r"(?<![:.\w])(?:[A-F0-9]{1,4}:){7}[A-F0-9]{1,4}(?![:.\w])"
DNS_REGEX = r"((?=[a-z0-9-]{1,63}\.)[a-z0-9]+(-[a-z0-9]+)*\.){1,126}[a-z]{2,63}"
# dns_regex =
# '\\b((?=[a-z0-9-]{1,63}\\.)[a-z0-9]+(-[a-z0-9]+)*\\.){2,}[a-z]{2,63}\\b'
URL_REGEX = r"""
(?P<protocol>(https?|ftp|telnet|ldap|file)://)
(?P<userinfo>([a-z0-9-._~!$&\'()*+,;=:]|%[0-9A-F]{2})*@)?
(?P<host>([a-z0-9-._~!$&\'()*+,;=]|%[0-9A-F]{2})*)
(:(?P<port>\d*))?
(/(?P<path>([^?\#"<> ]|%[0-9A-F]{2})*/?))?
(\?(?P<query>([a-z0-9-._~!$&'()*+,;=:/?@]|%[0-9A-F]{2})*))?
(\#(?P<fragment>([a-z0-9-._~!$&'()*+,;=:/?@]|%[0-9A-F]{2})*))?"""
WINPATH_REGEX = r"""
(?P<root>[a-z]:|\\\\[a-z0-9_.$-]+||[.]+)
(?P<folder>\\(?:[^\/:*?"\'<>|\r\n]+\\)*)
(?P<file>[^\\/*?""<>|\r\n ]+)"""
# Linux simplified - this ignores some legal linux paths avoid matching too much
# This also matches URLs but these should be thrown out by priority
# weighting since URL has a higher priority
LXPATH_REGEX = r"""(?P<root>/+||[.]+)
(?P<folder>/(?:[^\\/:*?<>|\r\n]+/)*)
(?P<file>[^/\0<>|\r\n ]+)"""
MD5_REGEX = r"(?:^|[^A-Fa-f0-9])(?P<hash>[A-Fa-f0-9]{32})(?:$|[^A-Fa-f0-9])"
SHA1_REGEX = r"(?:^|[^A-Fa-f0-9])(?P<hash>[A-Fa-f0-9]{40})(?:$|[^A-Fa-f0-9])"
SHA256_REGEX = r"(?:^|[^A-Fa-f0-9])(?P<hash>[A-Fa-f0-9]{64})(?:$|[^A-Fa-f0-9])"
_content_regex: Dict[str, IoCPattern] = {}
def __init__(self):
"""Intialize new instance of IoCExtract."""
# IP Addresses
self.add_ioc_type(IoCType.ipv4.name, self.IPV4_REGEX, 0, "ipaddress")
self.add_ioc_type(IoCType.ipv6.name, self.IPV6_REGEX, 0)
# Dns Domains
# This also matches IP addresses but IPs have higher
# priority both matching on the same substring will defer
# to the IP regex
self.add_ioc_type(IoCType.dns.name, self.DNS_REGEX, 1)
# Http requests
self.add_ioc_type(IoCType.url.name, self.URL_REGEX, 0)
# File paths
# Windows
self.add_ioc_type(IoCType.windows_path.name, self.WINPATH_REGEX, 2)
self.add_ioc_type(IoCType.linux_path.name, self.LXPATH_REGEX, 2)
# MD5, SHA1, SHA256 hashes
self.add_ioc_type(IoCType.md5_hash.name, self.MD5_REGEX, 1, "hash")
self.add_ioc_type(IoCType.sha1_hash.name, self.SHA1_REGEX, 1, "hash")
self.add_ioc_type(IoCType.sha256_hash.name, self.SHA256_REGEX, 1, "hash")
self.dom_val = DomainValidator()
# Public members
def add_ioc_type(
self, ioc_type: str, ioc_regex: str, priority: int = 0, group: str = None
):
"""
Add an IoC type and regular expression to use to the built-in set.
Parameters
----------
ioc_type : str
A unique name for the IoC type
ioc_regex : str
A regular expression used to search for the type
priority : int, optional
Priority of the regex match vs. other ioc_patterns. 0 is
the highest priority (the default is 0).
group : str, optional
The regex group to match (the default is None,
which will match on the whole expression)
Notes
-----
Pattern priorities.
If two IocType patterns match on the same substring, the matched
substring is assigned to the pattern/IocType with the highest
priority. E.g. `foo.bar.com` will match types: `dns`, `windows_path`
and `linux_path` but since `dns` has a higher priority, the expression
is assigned to the `dns` matches.
"""
if ioc_type is None or ioc_type.strip() is None:
raise Exception("No value supplied for ioc_type parameter")
if ioc_regex is None or ioc_regex.strip() is None:
raise Exception("No value supplied for ioc_regex parameter")
self._content_regex[ioc_type] = IoCPattern(
ioc_type=ioc_type,
comp_regex=_compile_regex(regex=ioc_regex),
priority=priority,
group=group,
)
@property
def ioc_types(self) -> dict:
"""
Return the current set of IoC types and regular expressions.
Returns
-------
dict
dict of IoC Type names and regular expressions
"""
return self._content_regex
# pylint: disable=too-many-locals
def extract(
self,
src: str = None,
data: pd.DataFrame = None,
columns: List[str] = None,
**kwargs,
) -> Union[Dict[str, Set[str]], pd.DataFrame]:
"""
Extract IoCs from either a string or pandas DataFrame.
Parameters
----------
src : str, optional
source string in which to look for IoC patterns
(the default is None)
data : pd.DataFrame, optional
input DataFrame from which to read source strings
(the default is None)
columns : list, optional
The list of columns to use as source strings,
if the `data` parameter is used. (the default is None)
Other Parameters
----------------
os_family : str, optional
'Linux' or 'Windows' (the default is 'Windows'). This
is used to toggle between Windows or Linux path matching.
ioc_types : list, optional
Restrict matching to just specified types.
(default is all types)
include_paths : bool, optional
Whether to include path matches (which can be noisy)
(the default is false - excludes 'windows_path'
and 'linux_path'). If `ioc_types` is specified
this parameter is ignored.
Returns
-------
Any
dict of found observables (if input is a string) or
DataFrame of observables
Notes
-----
Extract takes either a string or a pandas DataFrame as input.
When using the string option as an input extract will
return a dictionary of results.
When using a DataFrame the results will be returned as a new
DataFrame with the following columns:
- IoCType: the mnemonic used to distinguish different IoC Types
- Observable: the actual value of the observable
- SourceIndex: the index of the row in the input DataFrame from
which the source for the IoC observable was extracted.
IoCType Pattern selection
The default list is: ['ipv4', 'ipv6', 'dns', 'url',
'md5_hash', 'sha1_hash', 'sha256_hash'] plus any
user-defined types.
'windows_path', 'linux_path' are excluded unless `include_paths`
is True or explicitly included in `ioc_paths`.
"""
os_family = kwargs.get("os_family", "Windows")
ioc_types = kwargs.get("ioc_types", None)
include_paths = kwargs.get("include_paths", False)
if src and src.strip():
return self._scan_for_iocs(
src=src, os_family=os_family, ioc_types=ioc_types
)
if data is None:
raise Exception("No source data was supplied to extract")
if columns is None:
raise Exception("No values were supplied for the columns parameter")
# Use only requested IoC Type patterns
if ioc_types:
ioc_types_to_use = list(set(ioc_types))
else:
ioc_types_to_use = list(set(self._content_regex.keys()))
if not include_paths:
ioc_types_to_use.remove("windows_path")
ioc_types_to_use.remove("linux_path")
col_set = set(columns)
if not col_set <= set(data.columns):
missing_cols = [elem for elem in col_set if elem not in data.columns]
raise Exception(
"Source column(s) {} not found in supplied DataFrame".format(
", ".join(missing_cols)
)
)
result_columns = ["IoCType", "Observable", "SourceIndex"]
result_rows: List[pd.Series] = []
for idx, datarow in data.iterrows():
result_rows.extend(
self._search_in_row(
datarow, idx, columns, result_columns, os_family, ioc_types_to_use
)
)
result_frame = pd.DataFrame(data=result_rows, columns=result_columns)
return result_frame
# pylint: disable=too-many-arguments
def _search_in_row(
self,
datarow: pd.Series,
idx: Any,
columns: List[str],
result_columns: List[str],
os_family: str,
ioc_types_to_use: List[str],
) -> List[pd.Series]:
"""Return results for a single input row."""
result_rows = []
for col in columns:
ioc_results = self._scan_for_iocs(datarow[col], os_family, ioc_types_to_use)
for result_type, result_set in ioc_results.items():
if result_set:
for observable in result_set:
result_row = pd.Series(
data=[result_type, observable, idx], index=result_columns
)
result_rows.append(result_row)
return result_rows
def extract_df(
self, data: pd.DataFrame, columns: List[str], **kwargs
) -> pd.DataFrame:
"""
Extract IoCs from either a pandas DataFrame.
Parameters
----------
data : pd.DataFrame
input DataFrame from which to read source strings
columns : list
The list of columns to use as source strings,
Other Parameters
----------------
os_family : str, optional
'Linux' or 'Windows' (the default is 'Windows'). This
is used to toggle between Windows or Linux path matching.
ioc_types : list, optional
Restrict matching to just specified types.
(default is all types)
include_paths : bool, optional
Whether to include path matches (which can be noisy)
(the default is false - excludes 'windows_path'
and 'linux_path'). If `ioc_types` is specified
this parameter is ignored.
Returns
-------
pd.DataFrame
DataFrame of observables
Notes
-----
Extract takes a pandas DataFrame as input.
The results will be returned as a new
DataFrame with the following columns:
- IoCType: the mnemonic used to distinguish different IoC Types
- Observable: the actual value of the observable
- SourceIndex: the index of the row in the input DataFrame from
which the source for the IoC observable was extracted.
IoCType Pattern selection
The default list is: ['ipv4', 'ipv6', 'dns', 'url',
'md5_hash', 'sha1_hash', 'sha256_hash'] plus any
user-defined types.
'windows_path', 'linux_path' are excluded unless `include_paths`
is True or explicitly included in `ioc_paths`.
"""
os_family = kwargs.get("os_family", "Windows")
ioc_types = kwargs.get("ioc_types", None)
include_paths = kwargs.get("include_paths", False)
# Use only requested IoC Type patterns
if ioc_types:
ioc_types_to_use = list(set(ioc_types))
else:
ioc_types_to_use = list(set(self._content_regex.keys()))
if not include_paths:
ioc_types_to_use.remove("windows_path")
ioc_types_to_use.remove("linux_path")
col_set = set(columns)
if not col_set <= set(data.columns):
missing_cols = [elem for elem in col_set if elem not in data.columns]
raise Exception(
"Source column(s) {} not found in supplied DataFrame".format(
", ".join(missing_cols)
)
)
result_columns = ["IoCType", "Observable", "SourceIndex"]
result_rows = []
for idx, datarow in data.iterrows():
result_rows.extend(
self._search_in_row(
datarow, idx, columns, result_columns, os_family, ioc_types_to_use
)
)
result_frame = pd.DataFrame(data=result_rows, columns=result_columns)
return result_frame
def validate(self, input_str: str, ioc_type: str) -> bool:
"""
Check that `input_str` matches the regex for the specificed `ioc_type`.
Parameters
----------
input_str : str
the string to test
ioc_type : str
the regex pattern to use
Returns
-------
bool
True if match.
"""
if ioc_type == IoCType.file_hash.name:
val_type = self.file_hash_type(input_str).name
elif ioc_type == IoCType.hostname.name:
val_type = "dns"
else:
val_type = ioc_type
if val_type not in self._content_regex:
raise KeyError(
"Unknown type {}. Valid types are: {}".format(
ioc_type, list(self._content_regex.keys())
)
)
rgx = self._content_regex[val_type]
pattern_match = rgx.comp_regex.fullmatch(input_str)
if val_type == "dns":
return self.dom_val.validate_tld(input_str) and pattern_match
return pattern_match is not None
@staticmethod
def file_hash_type(file_hash: str) -> IoCType:
"""
Return specific IoCType based on hash length.
Parameters
----------
file_hash : str
File hash string
Returns
-------
IoCType
Specific hash type or unknown.
"""
hashsize_map = {
32: IoCType.md5_hash,
40: IoCType.sha1_hash,
64: IoCType.sha256_hash,
}
hashsize = len(file_hash.strip())
return hashsize_map.get(hashsize, IoCType.unknown)
def get_ioc_type(self, observable: str) -> str:
"""
Return first matching type.
Parameters
----------
observable : str
The IoC Observable to check
Returns
-------
str
The IoC type enumeration (unknown, if no match)
"""
results = self._scan_for_iocs(src=observable, os_family="Windows")
if not results:
results = self._scan_for_iocs(
src=observable, os_family="Linux", ioc_types=[IoCType.linux_path.name]
)
if not results:
return IoCType.unknown.name
# we need to select the type that is an exact match for the whole
# observable string (_scan_for_iocs will return matching substrings)
for ioc_type, match_set in results.items():
if observable in match_set:
return ioc_type
return IoCType.unknown.name
# Private methods
def _scan_for_iocs(
self, src: str, os_family: str, ioc_types: List[str] = None
) -> Dict[str, Set[str]]:
"""Return IoCs found in the string."""
ioc_results: Dict[str, Set] = defaultdict(set)
iocs_found: Dict[str, Tuple[str, int]] = {}
# pylint: disable=too-many-nested-blocks
for (ioc_type, rgx_def) in self._content_regex.items():
if ioc_types and ioc_type not in ioc_types:
continue
if (os_family == "Linux" and rgx_def.ioc_type == "windows_path") or (
os_family == "Windows" and rgx_def.ioc_type == "linux_path"
):
continue
match_pos = 0
for rgx_match in rgx_def.comp_regex.finditer(src, match_pos):
if rgx_match is None:
break
# If the rgx_def names a group to match on, use that
match_str = (
rgx_match.groupdict()[rgx_def.group]
if rgx_def.group
else rgx_match.group()
)
if ioc_type == "dns" and not self.dom_val.validate_tld(match_str):
continue
self._add_highest_pri_match(iocs_found, match_str, rgx_def)
if ioc_type == "url":
self._check_decode_url(match_str, rgx_def, match_pos, iocs_found)
match_pos = rgx_match.end()
for ioc, ioc_result in iocs_found.items():
ioc_results[ioc_result[0]].add(ioc)
return ioc_results
def _check_decode_url(self, match_str, rgx_def, match_pos, iocs_found):
"""Get any other IoCs from decoded URL."""
decoded_url = unquote(match_str)
for url_match in rgx_def.comp_regex.finditer(decoded_url, match_pos):
if url_match is not None:
self._add_highest_pri_match(iocs_found, url_match.group(), rgx_def)
self._add_highest_pri_match(
iocs_found,
url_match.groupdict()["host"],
self._content_regex["dns"],
)
@staticmethod
def _add_highest_pri_match(
iocs_found: dict, current_match: str, current_def: IoCPattern
):
# if we already found a match for this item and the previous
# ioc type is more specific then don't add this to the results
if (
current_match in iocs_found
and current_def.priority >= iocs_found[current_match][1]
):
return
iocs_found[current_match] = (current_def.ioc_type, current_def.priority)
| 35.363636
| 88
| 0.5785
|
ba240e9888114e1b9434ec6cb550695e335f096c
| 1,523
|
py
|
Python
|
mmfashion/core/evaluation/attr_predict_demo.py
|
aleksandrkrivolap/mmfashion
|
ca7f045d02db47b3d77fe15657fa0fddcadcb4ca
|
[
"Apache-2.0"
] | 1
|
2019-12-12T11:15:02.000Z
|
2019-12-12T11:15:02.000Z
|
mmfashion/core/evaluation/attr_predict_demo.py
|
aleksandrkrivolap/mmfashion
|
ca7f045d02db47b3d77fe15657fa0fddcadcb4ca
|
[
"Apache-2.0"
] | null | null | null |
mmfashion/core/evaluation/attr_predict_demo.py
|
aleksandrkrivolap/mmfashion
|
ca7f045d02db47b3d77fe15657fa0fddcadcb4ca
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import operator
from scipy.spatial.distance import cdist
import torch
class AttrPredictor(object):
def __init__(self, cfg, tops_type=[3, 5, 10]):
""" create the empty array to count
true positive(tp), true negative(tn), false positive(fp) and false negative(fn);
Args:
class_num : number of classes in the dataset
tops_type : default calculate top3, top5 and top10
"""
attr_cloth_file = open(cfg.attr_cloth_file).readlines()
self.attr_idx2name = {}
for i, line in enumerate(attr_cloth_file[2:]):
self.attr_idx2name[i] = line.strip('\n').split()[0]
def print_attr_name(self, pred_idx):
for idx in pred_idx:
print(self.attr_idx2name[idx])
def show_prediction(self, pred):
if isinstance(pred, torch.Tensor):
data = pred.data.cpu().numpy()
elif isinstance(pred, np.ndarray):
data = pred
else:
raise TypeError('type {} cannot be calculated.'.format(type(pred)))
for i in range(pred.size(0)):
indexes = np.argsort(data[i])[::-1]
idx3, idx5, idx10 = indexes[:3], indexes[:5], indexes[:10]
print('[ Top3 Prediction ]')
self.print_attr_name(idx3)
print('[ Top5 Prediction ]')
self.print_attr_name(idx5)
print('[ Top10 Prediction ]')
self.print_attr_name(idx10)
| 30.46
| 88
| 0.571898
|
088ec8e9ec20e83609bf4c99e1d770702ee32cd3
| 1,211
|
py
|
Python
|
aiub/urls.py
|
shakil-muntasir/aiub
|
23a0796059ec5192ee14ca48b979dad23e9d9cf4
|
[
"MIT"
] | 1
|
2021-09-14T14:11:45.000Z
|
2021-09-14T14:11:45.000Z
|
aiub/urls.py
|
shakil-muntasir/aiub
|
23a0796059ec5192ee14ca48b979dad23e9d9cf4
|
[
"MIT"
] | null | null | null |
aiub/urls.py
|
shakil-muntasir/aiub
|
23a0796059ec5192ee14ca48b979dad23e9d9cf4
|
[
"MIT"
] | null | null | null |
"""aiub URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from django.urls import include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.static import serve
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('homepage.urls')),
]
urlpatterns += staticfiles_urlpatterns()
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 27.522727
| 77
| 0.723369
|
5cb2bc045544e0d7ef61fb25db8dfb8af3abe669
| 597
|
py
|
Python
|
labs/lab-09/checkpoint5.py
|
niclee500/oss-repo-template
|
19a6cf14253355c540ecf2a7d82486c9cf524709
|
[
"MIT"
] | null | null | null |
labs/lab-09/checkpoint5.py
|
niclee500/oss-repo-template
|
19a6cf14253355c540ecf2a7d82486c9cf524709
|
[
"MIT"
] | null | null | null |
labs/lab-09/checkpoint5.py
|
niclee500/oss-repo-template
|
19a6cf14253355c540ecf2a7d82486c9cf524709
|
[
"MIT"
] | null | null | null |
from pymongo import MongoClient
client = MongoClient()
db = client.mongo_db_lab
collection = db.definitions
from bson.objectid import ObjectId
import random
import datetime
def random_word_requester():
def_list = []
for definition in collection.find():
def_list.append(definition)
random_def = random.choice(def_list)
collection.update_one({"_id": random_def["_id"]}, {"$push": {"dates": str(datetime.datetime.isoformat(datetime.datetime.utcnow()))}})
return collection.find({"_id": random_def["_id"]})
if __name__ == '__main__':
print(random_word_requester())
| 28.428571
| 137
| 0.726968
|
563ea44203fcf8aa7eb19ab79fce8c271669942d
| 8,373
|
py
|
Python
|
tests/test_template.py
|
ruke47/squadron
|
311bd93f11502917ce4e479cb89bde6764c83f27
|
[
"MIT"
] | null | null | null |
tests/test_template.py
|
ruke47/squadron
|
311bd93f11502917ce4e479cb89bde6764c83f27
|
[
"MIT"
] | null | null | null |
tests/test_template.py
|
ruke47/squadron
|
311bd93f11502917ce4e479cb89bde6764c83f27
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from squadron import template
from squadron.template import FileConfig, get_config, apply_config
from squadron.exthandlers.extutils import get_filename
from squadron.log import log, setup_log
from tempfile import mkdtemp
from shutil import rmtree
import pytest
from helper import are_dir_trees_equal, get_test_path
import os
import stat
setup_log('DEBUG', console=True)
test_path = os.path.join(get_test_path(), 'template_tests')
def test_template_basic(tmpdir):
dirname = str(tmpdir)
test = template.DirectoryRender(os.path.join(test_path, 'test1'))
test.render(dirname, {'name':'user', 'variable': 'test3'}, {})
assert are_dir_trees_equal(dirname, os.path.join(test_path, 'test1result'))
def test_template_chown_problem(tmpdir):
dirname = str(tmpdir)
test = template.DirectoryRender(os.path.join(test_path, 'test1-notpermitted'))
with pytest.raises(OSError) as ex:
test.render(dirname, {'name':'user'}, {}, False)
test.render(dirname, {'name':'user'}, {}, True)
assert are_dir_trees_equal(dirname, os.path.join(test_path, 'test1result'))
def test_template_with_config(tmpdir):
dirname = str(tmpdir)
test = template.DirectoryRender(os.path.join(test_path,'test2'))
result = test.render(dirname, {'name':'user'}, {})
assert are_dir_trees_equal(dirname, os.path.join(test_path,'test2result'))
assert len(result) == 3
assert result['test2/'] == True
assert result['test3/'] == False
assert result['test3/atomic/'] == True
st_file = os.stat(os.path.join(dirname, 'file'))
assert stat.S_IMODE(st_file.st_mode) == 0642
st_test3 = os.stat(os.path.join(dirname, 'test3'))
assert stat.S_IMODE(st_test3.st_mode) == 0775
st_test3_file = os.stat(os.path.join(dirname, 'test3', 'hello.txt'))
assert stat.S_IMODE(st_test3_file.st_mode) != 0775 # not recursive
def test_template_with_config_dir_error(tmpdir):
dirname = str(tmpdir)
with pytest.raises(ValueError) as ex:
test = template.DirectoryRender(os.path.join(test_path,'config-dir-error'))
result = test.render(dirname, {'name':'user'}, {})
assert ex is not None
def test_extensions():
assert template.get_sq_ext('filename.txt') == ''
assert template.get_sq_ext('filename.txt~gz') == 'gz'
assert template.get_sq_ext('filename~tar') == 'tar'
assert template.get_sq_ext('filename~~~tar.gz') == 'tar.gz'
assert template.get_sq_ext('filename~tar.bz2') == 'tar.bz2'
assert template.get_sq_ext('filename') == ''
assert get_filename('filename.txt') == 'filename.txt'
assert get_filename('filename.txt~gz') == 'filename.txt'
assert get_filename('filename~tar') == 'filename'
assert get_filename('filename~~~tar.gz') == 'filename'
assert get_filename('filename~tar.bz2') == 'filename'
assert get_filename('filename') == 'filename'
assert template.get_file_ext('filename.txt') == 'txt'
assert template.get_file_ext('filename.txt.gz') == 'gz'
assert template.get_file_ext('filename.tar') == 'tar'
assert template.get_file_ext('filename.tar.gz') == 'tar.gz'
assert template.get_file_ext('filename.tar.bz2') == 'tar.bz2'
assert template.get_file_ext('filename') == ''
with pytest.raises(ValueError) as ex:
get_filename('~tpl')
assert ex is not None
def test_autotest(tmpdir):
dirname = str(tmpdir)
test = template.DirectoryRender(os.path.join(test_path,'test-autotest'))
test.render(dirname, {'name':'user'}, {})
assert are_dir_trees_equal(dirname, os.path.join(test_path,'test-autotest-result'))
def test_autotest_fail(tmpdir):
dirname = str(tmpdir)
test = template.DirectoryRender(os.path.join(test_path,'test-autotest2'))
with pytest.raises(ValueError) as ex:
test.render(dirname, {'name':'user'}, {})
assert ex is not None
def test_parse_config(tmpdir):
conf_file = os.path.join(str(tmpdir), 'config.sq')
with open(conf_file, 'w') as wfile:
print('# this is a comment', file=wfile)
print('conf.d/ atomic:@atomic', file=wfile)
print('', file=wfile)
print('httpd.conf user:sean group:@{group.name} mode:0644', file=wfile)
cfg = {'atomic':True, 'group': {'name': 'dudes'}}
render = template.DirectoryRender(test_path)
result = render.parse_config_sq(conf_file, cfg)
assert len(result) == 2
assert result[0].filepath == 'conf.d/'
assert result[0].atomic == True
assert result[0].user == None
assert result[0].group == None
assert result[0].mode == None
assert result[1].filepath == 'httpd.conf'
assert result[1].atomic == False
assert result[1].user == 'sean'
assert result[1].group == 'dudes'
assert result[1].mode == '0644'
def test_parse_config_error(tmpdir):
conf_file = os.path.join(str(tmpdir), 'config.sq')
with open(conf_file, 'w') as wfile:
print('conf.d', file=wfile)
render = template.DirectoryRender(test_path)
with pytest.raises(ValueError) as ex:
render.parse_config_sq(conf_file, {})
assert ex is not None
conf_file = os.path.join(str(tmpdir), 'config2.sq')
with open(conf_file, 'w') as wfile:
print('conf.d/ atomic:true mdoe:0000', file=wfile)
with pytest.raises(ValueError) as ex:
render.parse_config_sq(conf_file, {})
assert ex is not None
conf_file = os.path.join(str(tmpdir), 'config3.sq')
with open(conf_file, 'w') as wfile:
print('conf.d/', file=wfile)
with pytest.raises(ValueError) as ex:
render.parse_config_sq(conf_file, {})
assert ex is not None
def test_get_config():
config = {'conf.d/' : FileConfig('conf.d/', True, None, None, 0755),
'conf.d/config' : FileConfig('conf.d/config', False, 'user', 'group', None)}
already_configured = set()
assert get_config('conf.d/','conf.d/', config, already_configured) == [config['conf.d/']]
assert 'conf.d/' in already_configured
assert get_config('conf.d/','conf.d/', config, already_configured) == []
assert 'conf.d/' in already_configured
assert get_config('conf.d/config', 'conf.d/config', config, set()) == [config['conf.d/'], config['conf.d/config']]
assert get_config('conf.d/non-existant-file', 'conf.d/non-existant-file', config, set()) == [config['conf.d/']]
assert get_config('non-exist', 'non-exist', config, set()) == []
def test_apply_config(tmpdir):
tmpdir = str(tmpdir)
filepath = os.path.join(tmpdir, 'test.txt')
with open(filepath, 'w') as cfile:
cfile.write('test')
apply_config(tmpdir, [FileConfig(filepath, False, None, None, '0777')], False)
st = os.stat(filepath)
assert stat.S_IMODE(st.st_mode) == 0777
filepath = os.path.join(tmpdir, 'test2.txt')
with open(filepath, 'w') as cfile:
cfile.write('test2')
apply_config(tmpdir, [FileConfig(filepath, False, None, None, '0777')], True)
st = os.stat(filepath)
# dry run doesn't affect mode
assert stat.S_IMODE(st.st_mode) == 0777
def test_git_repo(tmpdir):
dirname = str(tmpdir)
test = template.DirectoryRender(os.path.join(test_path,'test-git'))
test.render(dirname, {}, {})
assert are_dir_trees_equal(dirname, os.path.join(test_path,'test-git-result'))
def test_git_repo_chmod(tmpdir):
dirname = str(tmpdir)
test = template.DirectoryRender(os.path.join(test_path,'test-git-chmod'))
test.render(dirname, {}, {})
result_dir = os.path.join(test_path,'test-git-result')
assert are_dir_trees_equal(dirname, result_dir)
st = os.stat(os.path.join(dirname, 'test', 'install'))
# Chose some weird mode that wouldn't normally be set
assert stat.S_IMODE(st.st_mode) == 0604
def test_ext_created_dir(tmpdir):
dirname = str(tmpdir)
test = template.DirectoryRender(os.path.join(test_path,'test-ext-created-dir'))
atomic = test.render(dirname, {}, {})
assert 'testdir/' in atomic
assert atomic['testdir/'] == True
assert 'dir1/testdir/' in atomic
assert atomic['dir1/testdir/'] == True
result = os.path.join(dirname, 'testdir')
assert os.path.isdir(result) == True
assert len(os.listdir(result)) == 0 # Should be empty
result = os.path.join(dirname, 'dir1', 'testdir')
assert os.path.isdir(result) == True
assert len(os.listdir(result)) == 0 # Should be empty
| 36.246753
| 118
| 0.676221
|
35a9dcc4cd39d772880a2604e8415ce8ce0515f5
| 4,436
|
py
|
Python
|
egs/multi_en/s5/local/swbd_format_acronyms_dict.py
|
vingo555/kaldi
|
63774a54255ee102c5442ab8c1fc1a3bea4e83dc
|
[
"Apache-2.0"
] | 14
|
2019-02-12T02:59:02.000Z
|
2020-09-10T12:19:04.000Z
|
egs/multi_en/s5/local/swbd_format_acronyms_dict.py
|
Acidburn0zzz/kaldi
|
134cf7ee5c9604a431080db14cf6dc19f6524047
|
[
"Apache-2.0"
] | null | null | null |
egs/multi_en/s5/local/swbd_format_acronyms_dict.py
|
Acidburn0zzz/kaldi
|
134cf7ee5c9604a431080db14cf6dc19f6524047
|
[
"Apache-2.0"
] | 4
|
2019-01-05T12:40:11.000Z
|
2021-02-05T22:07:38.000Z
|
#!/usr/bin/env python
# Copyright 2015 Minhua Wu
# Apache 2.0
###########################################################################################
# This script was copied from egs/fisher_swbd/s5/local/format_acronyms_dict.py
# The source commit was e69198c3dc5633f98eb88e1cdf20b2521a598f21
# No changes were made
###########################################################################################
# convert acronyms in swbd dict to fisher convention
# IBM to i._b._m.
# BBC to b._b._c.
# BBCs to b._b._c.s
# BBC's to b._b._c.'s
import argparse,re
__author__ = 'Minhua Wu'
parser = argparse.ArgumentParser(description='format acronyms to a._b._c.')
parser.add_argument('-i','--input', help='Input lexicon',required=True)
parser.add_argument('-o1','--output1',help='Output acronym lexicon(mapped)', required=True)
parser.add_argument('-o2','--output2',help='Output acronym lexicon(original)', required=True)
parser.add_argument('-L','--Letter', help='Input single letter pronunciation',required=True)
parser.add_argument('-M','--Map', help='Output acronyms mapping',required=True)
args = parser.parse_args()
fin_lex = open(args.input,"r")
fin_Letter = open(args.Letter, "r")
fout_lex = open(args.output1, "w")
fout_lex_ori = open(args.output2, "w")
fout_map = open(args.Map, "w")
# Initialise single letter dictionary
dict_letter = {}
for single_letter_lex in fin_Letter:
items = single_letter_lex.split()
dict_letter[items[0]] = single_letter_lex[len(items[0])+1:].strip()
fin_Letter.close()
#print dict_letter
for lex in fin_lex:
items = lex.split()
word = items[0]
lexicon = lex[len(items[0])+1:].strip()
# find acronyms from words with only letters and '
pre_match = re.match(r'^[A-Za-z]+$|^[A-Za-z]+\'s$|^[A-Za-z]+s$',word)
if pre_match:
# find if words in the form of xxx's is acronym
if word[-2:] == '\'s' and (lexicon[-1] == 's' or lexicon[-1] == 'z'):
actual_word = word[:-2]
actual_lexicon = lexicon[:-2]
acronym_lexicon = ""
for l in actual_word:
acronym_lexicon = acronym_lexicon + dict_letter[l.upper()] + " "
if acronym_lexicon.strip() == actual_lexicon:
acronym_mapped = ""
for l in actual_word[:-1]:
acronym_mapped = acronym_mapped + l.lower() + '._'
acronym_mapped = acronym_mapped + actual_word[-1].lower() + '.\'s'
fout_map.write(word + '\t' + acronym_mapped + '\n')
fout_lex.write(acronym_mapped + ' ' + lexicon + '\n')
fout_lex_ori.write(word + ' ' + lexicon + '\n')
else:
continue
# find if words in the form of xxxs is acronym
elif word[-1] == 's' and (lexicon[-1] == 's' or lexicon[-1] == 'z'):
actual_word = word[:-1]
actual_lexicon = lexicon[:-2]
acronym_lexicon = ""
for l in actual_word:
acronym_lexicon = acronym_lexicon + dict_letter[l.upper()] + " "
if acronym_lexicon.strip() == actual_lexicon:
acronym_mapped = ""
for l in actual_word[:-1]:
acronym_mapped = acronym_mapped + l.lower() + '._'
acronym_mapped = acronym_mapped + actual_word[-1].lower() + '.s'
fout_map.write(word + '\t' + acronym_mapped + '\n')
fout_lex.write(acronym_mapped + ' ' + lexicon + '\n')
fout_lex_ori.write(word + ' ' + lexicon + '\n')
else:
continue
# find if words in the form of xxx (not ended with 's or s) is acronym
elif word.find('\'') == -1 and word[-1] != 's':
acronym_lexicon = ""
for l in word:
acronym_lexicon = acronym_lexicon + dict_letter[l.upper()] + " "
if acronym_lexicon.strip() == lexicon:
acronym_mapped = ""
for l in word[:-1]:
acronym_mapped = acronym_mapped + l.lower() + '._'
acronym_mapped = acronym_mapped + word[-1].lower() + '.'
fout_map.write(word + '\t' + acronym_mapped + '\n')
fout_lex.write(acronym_mapped + ' ' + lexicon + '\n')
fout_lex_ori.write(word + ' ' + lexicon + '\n')
else:
continue
else:
continue
| 42.247619
| 93
| 0.548242
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.