hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3774ba19f157b344cd70d5ac85ec575d59405bb7
| 2,833
|
py
|
Python
|
examples/dfp/v201702/report_service/run_inventory_report.py
|
agencia-watermelons/googleads-python-lib
|
d2e55863ecf7e5090c225d74b3f4c1f948cd5a21
|
[
"Apache-2.0"
] | null | null | null |
examples/dfp/v201702/report_service/run_inventory_report.py
|
agencia-watermelons/googleads-python-lib
|
d2e55863ecf7e5090c225d74b3f4c1f948cd5a21
|
[
"Apache-2.0"
] | null | null | null |
examples/dfp/v201702/report_service/run_inventory_report.py
|
agencia-watermelons/googleads-python-lib
|
d2e55863ecf7e5090c225d74b3f4c1f948cd5a21
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs a report equal to the "Whole network report" on the DFP website."""
import tempfile
# Import appropriate modules from the client library.
from googleads import dfp
from googleads import errors
def main(client):
# Initialize appropriate service.
network_service = client.GetService('NetworkService', version='v201702')
# Initialize a DataDownloader.
report_downloader = client.GetDataDownloader(version='v201702')
# Get root ad unit id for network.
root_ad_unit_id = (
network_service.getCurrentNetwork()['effectiveRootAdUnitId'])
# Set filter statement and bind value for reportQuery.
values = [{
'key': 'parent_ad_unit_id',
'value': {
'xsi_type': 'NumberValue',
'value': root_ad_unit_id
}
}]
filter_statement = {'query': 'WHERE PARENT_AD_UNIT_ID = :parent_ad_unit_id',
'values': values}
# Create report job.
report_job = {
'reportQuery': {
'dimensions': ['DATE', 'AD_UNIT_NAME'],
'adUnitView': 'HIERARCHICAL',
'columns': ['AD_SERVER_IMPRESSIONS', 'AD_SERVER_CLICKS',
'DYNAMIC_ALLOCATION_INVENTORY_LEVEL_IMPRESSIONS',
'DYNAMIC_ALLOCATION_INVENTORY_LEVEL_CLICKS',
'TOTAL_INVENTORY_LEVEL_IMPRESSIONS',
'TOTAL_INVENTORY_LEVEL_CPM_AND_CPC_REVENUE'],
'dateRangeType': 'LAST_WEEK',
'statement': filter_statement
}
}
try:
# Run the report and wait for it to finish.
report_job_id = report_downloader.WaitForReport(report_job)
except errors.DfpReportError, e:
print 'Failed to generate report. Error was: %s' % e
# Change to your preferred export format.
export_format = 'CSV_DUMP'
report_file = tempfile.NamedTemporaryFile(suffix='.csv.gz', delete=False)
# Download report data.
report_downloader.DownloadReportToFile(
report_job_id, export_format, report_file)
report_file.close()
# Display results.
print 'Report job with id \'%s\' downloaded to:\n%s' % (
report_job_id, report_file.name)
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| 32.563218
| 78
| 0.692199
|
4725aa57633875516732762251a5fe4ae760ba93
| 1,106
|
py
|
Python
|
core-python/Core_Python/datatstructure/Assessment_4_7.py
|
theumang100/tutorials-1
|
497f54c2adb022c316530319a168fca1c007d4b1
|
[
"MIT"
] | 9
|
2020-04-23T05:24:19.000Z
|
2022-02-17T16:37:51.000Z
|
core-python/Core_Python/datatstructure/Assessment_4_7.py
|
theumang100/tutorials-1
|
497f54c2adb022c316530319a168fca1c007d4b1
|
[
"MIT"
] | 5
|
2020-10-01T05:08:37.000Z
|
2020-10-12T03:18:10.000Z
|
core-python/Core_Python/datatstructure/Assessment_4_7.py
|
theumang100/tutorials-1
|
497f54c2adb022c316530319a168fca1c007d4b1
|
[
"MIT"
] | 9
|
2020-04-28T14:06:41.000Z
|
2021-10-19T18:32:28.000Z
|
''' Use a dictionary to count the frequency of letters in the input string.
Only letters should be counted, not blank spaces, numbers, or
punctuation. Upper case should be considered the same as lower case.
For example, count_letters("This is a sentence.") should return
{'t': 2, 'h': 1, 'i': 2, 's': 3, 'a': 1, 'e': 3, 'n': 2, 'c': 1}. '''
def count_letters(text):
result = {}
# Go through each letter in the text
for letter in text:
# Check if the letter needs to be counted or not
if letter.upper() >='A' and letter.upper() <= 'Z':
if letter.lower() not in result:
result[letter.lower()] = 0
# Add or increment the value in the dictionary
result[letter.lower()] += 1
return result
print(count_letters("AaBbCc"))
# Should be {'a': 2, 'b': 2, 'c': 2}
print(count_letters("Math is fun! 2+2=4"))
# Should be {'m': 1, 'a': 1, 't': 1, 'h': 1, 'i': 1, 's': 1, 'f': 1, 'u': 1, 'n': 1}
print(count_letters("This is a sentence."))
# Should be {'t': 2, 'h': 1, 'i': 2, 's': 3, 'a': 1, 'e': 3, 'n': 2, 'c': 1}
| 39.5
| 84
| 0.564195
|
ddfce00c8a091c05a8aa9e492cb1a68fba16b364
| 4,240
|
py
|
Python
|
strativ/strativ/settings.py
|
AR-Ashraf/Django-REST-API-Website
|
1907fd30ebb66accb1c102e5a10ba506bff0b4d4
|
[
"MIT"
] | null | null | null |
strativ/strativ/settings.py
|
AR-Ashraf/Django-REST-API-Website
|
1907fd30ebb66accb1c102e5a10ba506bff0b4d4
|
[
"MIT"
] | null | null | null |
strativ/strativ/settings.py
|
AR-Ashraf/Django-REST-API-Website
|
1907fd30ebb66accb1c102e5a10ba506bff0b4d4
|
[
"MIT"
] | null | null | null |
"""
Django settings for strativ project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-e@0tfwa1109_!5)tq*e9h$myq892f#-szi8h8$&b_9isb67lmz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apiapp.apps.ApiappConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'accounts.apps.AccountsConfig',
]
# REST_FRAMEWORK = {
# 'DEFAULT_AUTHENTICATION_CLASSES': (
#
# 'rest_framework.authentication.SessionAuthentication',
# ),
# 'DEFAULT_PERMISSION_CLASSES': (
# 'rest_framework.permissions.IsAuthenticated',
# 'rest_framework.authentication.TokenAuthentication',
# ),
# 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
# 'PAGE_SIZE': 10,
# }
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
#'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'PAGE_SIZE': 10
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'strativ.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'strativ.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/images/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 27.532468
| 91
| 0.701651
|
ce0228f45b083ed0f9251a80940f7ca7f203c118
| 808
|
py
|
Python
|
manage.py
|
George-Stephen/Enrollment_api
|
a7030e9988da68107b434a1ecf810eab4a4fceca
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
George-Stephen/Enrollment_api
|
a7030e9988da68107b434a1ecf810eab4a4fceca
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
George-Stephen/Enrollment_api
|
a7030e9988da68107b434a1ecf810eab4a4fceca
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "enrollment.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.130435
| 77
| 0.643564
|
2e4239bc7ceef910bfd7a5967214f81f1dfd5d09
| 4,172
|
py
|
Python
|
mmocr/models/textdet/necks/fpn_cat.py
|
yefangok/mmocr
|
ee185c17d6097c4a9ffeaec41f9cb1d271568074
|
[
"Apache-2.0"
] | null | null | null |
mmocr/models/textdet/necks/fpn_cat.py
|
yefangok/mmocr
|
ee185c17d6097c4a9ffeaec41f9cb1d271568074
|
[
"Apache-2.0"
] | null | null | null |
mmocr/models/textdet/necks/fpn_cat.py
|
yefangok/mmocr
|
ee185c17d6097c4a9ffeaec41f9cb1d271568074
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import auto_fp16
from mmdet.models.builder import NECKS
@NECKS.register_module()
class FPNC(nn.Module):
"""FPN-like fusion module in Real-time Scene Text Detection with
Differentiable Binarization.
This was partially adapted from https://github.com/MhLiao/DB and
https://github.com/WenmuZhou/DBNet.pytorch
"""
def __init__(self,
in_channels,
lateral_channels=256,
out_channels=64,
bias_on_lateral=False,
bn_re_on_lateral=False,
bias_on_smooth=False,
bn_re_on_smooth=False,
conv_after_concat=False):
super().__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.lateral_channels = lateral_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.bn_re_on_lateral = bn_re_on_lateral
self.bn_re_on_smooth = bn_re_on_smooth
self.conv_after_concat = conv_after_concat
self.lateral_convs = nn.ModuleList()
self.smooth_convs = nn.ModuleList()
self.num_outs = self.num_ins
for i in range(self.num_ins):
norm_cfg = None
act_cfg = None
if self.bn_re_on_lateral:
norm_cfg = dict(type='BN')
act_cfg = dict(type='ReLU')
l_conv = ConvModule(
in_channels[i],
lateral_channels,
1,
bias=bias_on_lateral,
conv_cfg=None,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
norm_cfg = None
act_cfg = None
if self.bn_re_on_smooth:
norm_cfg = dict(type='BN')
act_cfg = dict(type='ReLU')
smooth_conv = ConvModule(
lateral_channels,
out_channels,
3,
bias=bias_on_smooth,
padding=1,
conv_cfg=None,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.lateral_convs.append(l_conv)
self.smooth_convs.append(smooth_conv)
if self.conv_after_concat:
norm_cfg = dict(type='BN')
act_cfg = dict(type='ReLU')
self.out_conv = ConvModule(
out_channels * self.num_outs,
out_channels * self.num_outs,
3,
padding=1,
conv_cfg=None,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
"""Initialize the weights of FPN module."""
for m in self.lateral_convs:
m.init_weights()
for m in self.smooth_convs:
m.init_weights()
if self.conv_after_concat:
self.out_conv.init_weights()
@auto_fp16()
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i])
for i, lateral_conv in enumerate(self.lateral_convs)
]
used_backbone_levels = len(laterals)
# build top-down path
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] += F.interpolate(
laterals[i], size=prev_shape, mode='nearest')
# build outputs
# part 1: from original levels
outs = [
self.smooth_convs[i](laterals[i])
for i in range(used_backbone_levels)
]
for i, out in enumerate(outs):
outs[i] = F.interpolate(
outs[i], size=outs[0].shape[2:], mode='nearest')
out = torch.cat(outs, dim=1)
if self.conv_after_concat:
out = self.out_conv(out)
return out
| 32.59375
| 68
| 0.551534
|
7ea0666b12b0c1a0364e08da9dc54e7b02b7d7a8
| 985
|
py
|
Python
|
var/spack/repos/builtin/packages/biobloom/package.py
|
FJ-NaokiMatsumura/spack
|
7cfe626e21795f0a4bfe61f36ca1b48ffd2fc961
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/biobloom/package.py
|
FJ-NaokiMatsumura/spack
|
7cfe626e21795f0a4bfe61f36ca1b48ffd2fc961
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/biobloom/package.py
|
FJ-NaokiMatsumura/spack
|
7cfe626e21795f0a4bfe61f36ca1b48ffd2fc961
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Biobloom(AutotoolsPackage):
"""BioBloom Tools (BBT) provides the means to create filters for a given
reference and then to categorize sequences."""
homepage = "https://github.com/bcgsc/biobloom"
url = "https://github.com/bcgsc/biobloom/releases/download/2.2.0/biobloomtools-2.2.0.tar.gz"
version('2.2.0', sha256='5d09f8690f0b6402f967ac09c5b0f769961f3fe3791000f8f73af6af7324f02c')
depends_on('boost+exception+math+serialization+container')
depends_on('sdsl-lite')
depends_on('sparsehash')
depends_on('zlib')
def configure_args(self):
# newer versions of sdsl-lite introduce tolerable warnings
# they must disabled to allow the build to continue
return ['CXXFLAGS=-w', 'CPPFLAGS=-w']
| 35.178571
| 101
| 0.723858
|
97c0714e4f8a7a6e18384b1c252853a7e912642f
| 531
|
py
|
Python
|
src/models/__init__.py
|
sameasy/hgcal_ldrd
|
7775f2346b9f6764ca30b134c1aeceb2e8ce4848
|
[
"BSD-3-Clause"
] | 9
|
2019-03-13T15:37:07.000Z
|
2021-07-22T01:55:30.000Z
|
src/models/__init__.py
|
sameasy/hgcal_ldrd
|
7775f2346b9f6764ca30b134c1aeceb2e8ce4848
|
[
"BSD-3-Clause"
] | null | null | null |
src/models/__init__.py
|
sameasy/hgcal_ldrd
|
7775f2346b9f6764ca30b134c1aeceb2e8ce4848
|
[
"BSD-3-Clause"
] | 9
|
2019-02-25T19:35:48.000Z
|
2020-02-24T22:53:04.000Z
|
"""
Python module for holding our PyTorch models.
"""
from .EdgeNet import EdgeNet
from .gnn_geometric import GNNSegmentClassifier
from .PointNet import PointNet
_models = {'EdgeNet': EdgeNet,
'heptrkx_segment_classifier': GNNSegmentClassifier,
'PointNet': PointNet}
def get_model(name, **model_args):
"""
Top-level factory function for getting your models.
"""
if name in _models:
return _models[name](**model_args)
else:
raise Exception('Model %s unknown' % name)
| 25.285714
| 62
| 0.677966
|
95edda7547b0c88148f63e91cfaeb36685561e61
| 3,562
|
py
|
Python
|
Imaging/Core/Testing/Python/ResliceBSpline.py
|
txwhhny/vtk
|
854d9aa87b944bc9079510515996406b98b86f7c
|
[
"BSD-3-Clause"
] | 1,755
|
2015-01-03T06:55:00.000Z
|
2022-03-29T05:23:26.000Z
|
Imaging/Core/Testing/Python/ResliceBSpline.py
|
txwhhny/vtk
|
854d9aa87b944bc9079510515996406b98b86f7c
|
[
"BSD-3-Clause"
] | 29
|
2015-04-23T20:58:30.000Z
|
2022-03-02T16:16:42.000Z
|
Imaging/Core/Testing/Python/ResliceBSpline.py
|
txwhhny/vtk
|
854d9aa87b944bc9079510515996406b98b86f7c
|
[
"BSD-3-Clause"
] | 1,044
|
2015-01-05T22:48:27.000Z
|
2022-03-31T02:38:26.000Z
|
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# this script tests vtkImageReslice with different interpolation modes,
# with the wrap-pad feature turned on and with a rotation
# Image pipeline
reader = vtk.vtkImageReader()
reader.ReleaseDataFlagOff()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetDataSpacing(3.2,3.2,1.5)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
transform = vtk.vtkTransform()
# rotate about the center of the image
transform.Translate(+100.8,+100.8,+69.0)
transform.RotateWXYZ(10,1,1,0)
transform.Translate(-100.8,-100.8,-69.0)
bspline3 = vtk.vtkImageBSplineInterpolator()
bspline3.SetSplineDegree(3)
bspline9 = vtk.vtkImageBSplineInterpolator()
bspline9.SetSplineDegree(9)
coeffs1 = vtk.vtkImageBSplineCoefficients()
coeffs1.SetInputConnection(reader.GetOutputPort())
coeffs1.SetSplineDegree(3)
coeffs2 = vtk.vtkImageBSplineCoefficients()
coeffs2.SetInputConnection(reader.GetOutputPort())
coeffs2.SetSplineDegree(9)
reslice1 = vtk.vtkImageReslice()
reslice1.SetInputConnection(coeffs1.GetOutputPort())
reslice1.SetResliceTransform(transform)
reslice1.SetInterpolator(bspline3)
reslice1.SetOutputSpacing(2.0,2.0,1.5)
reslice1.SetOutputOrigin(-32,-32,40)
reslice1.SetOutputExtent(0,127,0,127,0,0)
reslice2 = vtk.vtkImageReslice()
reslice2.SetInputConnection(coeffs1.GetOutputPort())
reslice2.SetInterpolator(bspline3)
reslice2.SetOutputSpacing(2.0,2.0,1.5)
reslice2.SetOutputOrigin(-32,-32,40)
reslice2.SetOutputExtent(0,127,0,127,0,0)
reslice3 = vtk.vtkImageReslice()
reslice3.SetInputConnection(coeffs2.GetOutputPort())
reslice3.SetResliceTransform(transform)
reslice3.SetInterpolator(bspline9)
reslice3.SetOutputSpacing(2.0,2.0,1.5)
reslice3.SetOutputOrigin(-32,-32,40)
reslice3.SetOutputExtent(0,127,0,127,0,0)
reslice4 = vtk.vtkImageReslice()
reslice4.SetInputConnection(coeffs2.GetOutputPort())
reslice4.SetInterpolator(bspline9)
reslice4.SetOutputSpacing(2.0,2.0,1.5)
reslice4.SetOutputOrigin(-32,-32,40)
reslice4.SetOutputExtent(0,127,0,127,0,0)
mapper1 = vtk.vtkImageMapper()
mapper1.SetInputConnection(reslice1.GetOutputPort())
mapper1.SetColorWindow(2000)
mapper1.SetColorLevel(1000)
mapper1.SetZSlice(0)
mapper2 = vtk.vtkImageMapper()
mapper2.SetInputConnection(reslice2.GetOutputPort())
mapper2.SetColorWindow(2000)
mapper2.SetColorLevel(1000)
mapper2.SetZSlice(0)
mapper3 = vtk.vtkImageMapper()
mapper3.SetInputConnection(reslice3.GetOutputPort())
mapper3.SetColorWindow(2000)
mapper3.SetColorLevel(1000)
mapper3.SetZSlice(0)
mapper4 = vtk.vtkImageMapper()
mapper4.SetInputConnection(reslice4.GetOutputPort())
mapper4.SetColorWindow(2000)
mapper4.SetColorLevel(1000)
mapper4.SetZSlice(0)
actor1 = vtk.vtkActor2D()
actor1.SetMapper(mapper1)
actor2 = vtk.vtkActor2D()
actor2.SetMapper(mapper2)
actor3 = vtk.vtkActor2D()
actor3.SetMapper(mapper3)
actor4 = vtk.vtkActor2D()
actor4.SetMapper(mapper4)
imager1 = vtk.vtkRenderer()
imager1.AddActor2D(actor1)
imager1.SetViewport(0.5,0.0,1.0,0.5)
imager2 = vtk.vtkRenderer()
imager2.AddActor2D(actor2)
imager2.SetViewport(0.0,0.0,0.5,0.5)
imager3 = vtk.vtkRenderer()
imager3.AddActor2D(actor3)
imager3.SetViewport(0.5,0.5,1.0,1.0)
imager4 = vtk.vtkRenderer()
imager4.AddActor2D(actor4)
imager4.SetViewport(0.0,0.5,0.5,1.0)
imgWin = vtk.vtkRenderWindow()
imgWin.AddRenderer(imager1)
imgWin.AddRenderer(imager2)
imgWin.AddRenderer(imager3)
imgWin.AddRenderer(imager4)
imgWin.SetSize(256,256)
imgWin.Render()
# --- end of script --
| 33.92381
| 71
| 0.8105
|
f08b275d957d08491fe35f6e62b31331c764ee59
| 485
|
py
|
Python
|
data/sr_utils.py
|
crisbodnar/cwn
|
31e40d839d9996ef1c1d46931702ca5a69dbe2ce
|
[
"MIT"
] | 74
|
2021-07-30T08:36:58.000Z
|
2022-03-27T19:50:49.000Z
|
data/sr_utils.py
|
crisbodnar/cwn
|
31e40d839d9996ef1c1d46931702ca5a69dbe2ce
|
[
"MIT"
] | 14
|
2022-01-02T11:22:00.000Z
|
2022-01-06T19:32:34.000Z
|
data/sr_utils.py
|
crisbodnar/cwn
|
31e40d839d9996ef1c1d46931702ca5a69dbe2ce
|
[
"MIT"
] | 13
|
2021-08-05T15:26:06.000Z
|
2022-02-19T06:59:15.000Z
|
import networkx as nx
import torch
from torch_geometric.utils import to_undirected
def load_sr_dataset(path):
"""Load the Strongly Regular Graph Dataset from the supplied path."""
nx_graphs = nx.read_graph6(path)
graphs = list()
for nx_graph in nx_graphs:
n = nx_graph.number_of_nodes()
edge_index = to_undirected(torch.tensor(list(nx_graph.edges()), dtype=torch.long).transpose(1,0))
graphs.append((edge_index, n))
return graphs
| 30.3125
| 105
| 0.701031
|
7ea5b07daa5045952657982398c624000e09f98d
| 2,966
|
py
|
Python
|
NEMbox/cmd_parser.py
|
wangjianyuan10/musicbox
|
f182053b07badc5d34190aeea85ff38d364a164e
|
[
"MIT"
] | 2
|
2020-03-21T15:20:28.000Z
|
2020-04-16T07:22:46.000Z
|
NEMbox/cmd_parser.py
|
e71828/musicbox
|
f182053b07badc5d34190aeea85ff38d364a164e
|
[
"MIT"
] | null | null | null |
NEMbox/cmd_parser.py
|
e71828/musicbox
|
f182053b07badc5d34190aeea85ff38d364a164e
|
[
"MIT"
] | 1
|
2020-06-10T09:22:38.000Z
|
2020-06-10T09:22:38.000Z
|
#!/usr/bin/env python
# coding=utf-8
# __author__='walker'
"""
捕获类似curses键盘输入流,生成指令流
"""
from copy import deepcopy
from functools import wraps
import os
ERASE_SPEED = 5 # 屏幕5秒刷新一次 去除错误的显示
__all__ = ['cmd_parser', 'parse_keylist', 'coroutine', 'erase_coroutine']
def coroutine(func):
@wraps(func)
def primer(*args, **kwargs):
gen = func(*args, **kwargs)
next(gen)
return gen
return primer
def _cmd_parser():
'''
A generator receive key value typed by user return constant keylist.
输入键盘输入流,输出指令流,以curses默认-1为信号终止.
'''
pre_key = -1
keylist = []
while 1:
key = yield
if key*pre_key < 0 and key > pre_key:
temp_pre_key = key
keylist.append(key)
elif key*pre_key > 0 and key+pre_key > 0:
temp_pre_key = key
keylist.append(key)
elif key*pre_key < 0 and key < pre_key:
temp_pre_key = key
return keylist
pre_key = key
def cmd_parser(results):
'''
A generator manager which can catch StopIteration and start a new Generator.
生成器管理对象,可以优雅地屏蔽生成器的终止信号,并重启生成器
'''
while 1:
results.clear()
results += yield from _cmd_parser()
yield results
def _erase_coroutine():
keylist = []
while 1:
key = yield
keylist.append(key)
if len(set(keylist)) > 1:
return keylist
elif len(keylist) >= ERASE_SPEED*2:
return keylist
def erase_coroutine(erase_cmd_list):
while 1:
erase_cmd_list.clear()
erase_cmd_list += yield from _erase_coroutine()
yield erase_cmd_list
def parse_keylist(keylist):
"""
'2' '3' '4' 'j' ----> 234 j
supoort keys [ ] j k <KEY_UP> <KEY_DOWN>
"""
keylist = deepcopy(keylist)
if keylist == []:
return None
tail_cmd = keylist.pop()
if tail_cmd in range(48, 58) and (set(keylist) | set(range(48, 58))) \
== set(range(48, 58)):
return int(''.join([chr(i) for i in keylist]+[chr(tail_cmd)]))
if len(keylist) == 0:
return (0, tail_cmd)
if tail_cmd in (ord('['), ord(']'), ord('j'), ord('k'), 258, 259) and \
max(keylist) <= 57 and min(keylist) >= 48:
return (int(''.join([chr(i) for i in keylist])), tail_cmd)
return None
def main(data):
'''
tset code
测试代码
'''
results = []
group = cmd_parser(results)
next(group)
for i in data:
group.send(i)
group.send(-1)
print(results)
next(group)
for i in data:
group.send(i)
group.send(-1)
print(results)
x = _cmd_parser()
print('-----------')
print(x.send(None))
print(x.send(1))
print(x.send(2))
print(x.send(3))
print(x.send(3))
print(x.send(3))
try:
print(x.send(-1))
except Exception as e:
print(e.value)
if __name__ == '__main__':
main(list(range(1, 12,)[::-1]))
| 22.641221
| 80
| 0.564734
|
4ce96b38c283059864453f2586fb8b929ca48b14
| 493
|
py
|
Python
|
src/requirementslib/__init__.py
|
stewartmiles/requirementslib
|
644198471362e5e7be7a32d2a9fdadd538391aef
|
[
"MIT"
] | 72
|
2018-06-20T07:39:24.000Z
|
2022-02-23T16:10:51.000Z
|
src/requirementslib/__init__.py
|
stewartmiles/requirementslib
|
644198471362e5e7be7a32d2a9fdadd538391aef
|
[
"MIT"
] | 234
|
2018-06-03T06:27:28.000Z
|
2022-03-31T11:37:59.000Z
|
src/requirementslib/__init__.py
|
stewartmiles/requirementslib
|
644198471362e5e7be7a32d2a9fdadd538391aef
|
[
"MIT"
] | 25
|
2018-06-03T06:12:32.000Z
|
2022-03-09T12:15:23.000Z
|
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function
import logging
import warnings
from vistir.compat import ResourceWarning
from .models.lockfile import Lockfile
from .models.pipfile import Pipfile
from .models.requirements import Requirement
__version__ = "1.5.17.dev0"
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
warnings.filterwarnings("ignore", category=ResourceWarning)
__all__ = ["Lockfile", "Pipfile", "Requirement"]
| 22.409091
| 59
| 0.793103
|
6e284bdf86b25337c24f9964135cb17c9a0ddf6f
| 284
|
py
|
Python
|
allLongestStrings.py
|
pclumson1/Python3_Algorithms
|
7b8106af1641aeb09b6bb1ac1881b8beecde1184
|
[
"MIT"
] | null | null | null |
allLongestStrings.py
|
pclumson1/Python3_Algorithms
|
7b8106af1641aeb09b6bb1ac1881b8beecde1184
|
[
"MIT"
] | null | null | null |
allLongestStrings.py
|
pclumson1/Python3_Algorithms
|
7b8106af1641aeb09b6bb1ac1881b8beecde1184
|
[
"MIT"
] | null | null | null |
#allLongestStrings
def allLongestStrings(inputArray):
arr_len = [len(i) for i in inputArray]
long = max(arr_len)
strings = []
for i in inputArray[0:]:
if len(i) == long:
strings.append(i) # or else strings = strings + [i]
return strings
| 31.555556
| 64
| 0.598592
|
f9c4f3c0d5afe7d5be83b6223d1371fb726c524f
| 13,413
|
py
|
Python
|
WP3/Task3.1/notebooks/Q6_Distribution_References.py
|
on-merrit/ON-MERRIT
|
a21324a54a6365f2f769b5952b0cf5347a97d480
|
[
"MIT"
] | 2
|
2019-12-10T13:10:58.000Z
|
2019-12-13T10:11:41.000Z
|
WP3/Task3.1/notebooks/Q6_Distribution_References.py
|
on-merrit/ON-MERRIT
|
a21324a54a6365f2f769b5952b0cf5347a97d480
|
[
"MIT"
] | 4
|
2020-03-31T12:13:45.000Z
|
2020-04-15T15:59:17.000Z
|
WP3/Task3.1/notebooks/Q6_Distribution_References.py
|
on-merrit/ON-MERRIT
|
a21324a54a6365f2f769b5952b0cf5347a97d480
|
[
"MIT"
] | 1
|
2020-02-26T08:52:39.000Z
|
2020-02-26T08:52:39.000Z
|
#!/usr/bin/env python
# coding: utf-8
# # This will create plots for institutions of universities in THE WUR univs only and for the period of 2007-2017. The input dataset contains info of THE WUR univs only but for any period of time.
# #### The unpaywall dump used was from (April or June) 2018; hence analysis until 2017 only is going to be included.
# ## Question : What is the distribution of references (outgoing) for open access articles vs subscription based articles in papers published by the university?
# In[1]:
# standard path wrangling to be able to import project config and sources
import os
import sys
from os.path import join
root = os.path.dirname(os.getcwd())
sys.path.append(root)
print('Project root: {}'.format(root))
# In[2]:
sys.path.append(join(root,"spark/shared/"))
from MAG_utils import *
# In[ ]:
# In[3]:
# Built-in
import json
# Installed
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib import rc,rcParams
from matplotlib.patches import Rectangle
import unicodedata
import re
from statistics import mean
# In[4]:
cfg = None
with open(join(root,"spark/config.json")) as fp:
cfg = json.load(fp)
# In[5]:
# cfg
# In[6]:
cnames_for_plot = {
"austria" : "Austria",
"brazil" : "Brazil",
"germany" : "Germany",
"india" : "India",
"portugal" : "Portugal",
"russia" : "Russia",
"uk" : "UK",
"usa" : "USA"
}
# In[7]:
output_dir = join(root,"documents/analysis/dataset_selection_question6")
# In[8]:
# Create a new directory to save results
os.makedirs(output_dir)
# In[9]:
study_years = [2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017]
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# # Extraction of OA and unknown status counts for papers referenced by publications coming from each university.
# In[10]:
def get_univ_papers_references_counts(country_papers_OA_df, univs_name):
'''
Get the plot of count OA and non-OA papers referenced by all publications from each university in the input country
'''
univs_info = {}
univs_not_found = []
univs_found = []
for org_univ_name in set(univs_name): # remove duplicate univ names in the THE list, if any
# print(org_univ_name)
THE_univ_name_normalised = mag_normalisation_institution_names(org_univ_name)
'''
The dataframe that will be selected for the current univ is either :
1. When the MAG normalizedname column matches to THE_univ_name_normalised
or
2. When the MAG normalised(wikiname) matches to THE_univ_name_normalised -- this matches English names (in MAG wiki links as well as THE) of non English name (in MAG normalisedname or displayname) universities.
'''
univ_papers_df_set1 = country_papers_OA_df[country_papers_OA_df['normalizedname']==THE_univ_name_normalised]
univ_papers_df_set2 = country_papers_OA_df[country_papers_OA_df['normalizedwikiname']==THE_univ_name_normalised]
# The records in two sets can be the excatly the same
# Concat and remove exact duplicates -- https://stackoverflow.com/a/21317570/530399
univ_papers_df = pd.concat([univ_papers_df_set1, univ_papers_df_set2]).drop_duplicates().reset_index(drop=True)
# Put additional criteria that these papers are from 2007 till 2017
univ_papers_df = univ_papers_df[univ_papers_df['year'].isin(study_years)]
# Same paper will have multiple entries if there are multiple authors for that paper from same university.
# This is not necessary because the input dataset was already prepared to exclude such duplicates.
# univ_papers_df = univ_papers_df.drop_duplicates(subset="paperid")
count_total_univ_papers = len(univ_papers_df)
# For those I couldn't match/find their name, it is not fair to say that their OA count is 0. Should be excluded from the graph.
if count_total_univ_papers==0:
univs_not_found.append(org_univ_name+" @ "+THE_univ_name_normalised)
else:
univs_found.append(org_univ_name)
univs_info[org_univ_name] = {}
# int casting needed to convert numpy int (json-incompatible) to python int
count_OA_univ_referenced_papers = int(univ_papers_df['count_OA_references'].sum())
count_unknown_univ_referenced_papers = int(univ_papers_df['count_unknown_references'].sum())
count_total_univ_referenced_papers = count_OA_univ_referenced_papers + count_unknown_univ_referenced_papers
univ_oa_references_percent = (count_OA_univ_referenced_papers*100.00)/count_total_univ_referenced_papers
univ_other_references_percent = (count_unknown_univ_referenced_papers*100.00)/count_total_univ_referenced_papers
univs_info[org_univ_name]["count_OA_referenced_papers"] = count_OA_univ_referenced_papers
univs_info[org_univ_name]["percent_OA_referenced_papers"] = univ_oa_references_percent
univs_info[org_univ_name]["count_unknown_referenced_papers"] = count_unknown_univ_referenced_papers
univs_info[org_univ_name]["percent_unknown_referenced_papers"] = univ_other_references_percent
univs_info[org_univ_name]["count_total_referenced_papers"] = count_total_univ_referenced_papers
return univs_info, univs_not_found, univs_found
# In[11]:
all_countries_all_univs_OA_info = {}
all_countries_univs_found_not_found = {}
for country_name,univs_name in cfg['data']['all_THE_WUR_institutions_by_country'].items():
print("\nProcesing for dataset of univs in "+country_name+"\n")
all_countries_univs_found_not_found[country_name] = {}
# CSV has repeated header from multiple partitions of the merge on pyspark csv output. Hence need to treat as string.
country_papers_OA_df = pd.read_csv(join(root,"data/processed/rc_oa_"+country_name+"_papers.csv"), header=0, sep=",", dtype={"year": object, "wikipage": object, "normalizedwikiname": object, 'count_OA_references': object, "count_unknown_references": object}) # object means string
# Then eliminate problematic lines
# temp fix until spark csv merge header issue is resolved -- the header line is present in each re-partition's output csv
country_papers_OA_df.drop(country_papers_OA_df[country_papers_OA_df.paperid == "paperid"].index, inplace=True)
# Then reset dtypes as needed.
country_papers_OA_df = country_papers_OA_df.astype({'year':int})
country_papers_OA_df = country_papers_OA_df.astype({'count_OA_references':int})
country_papers_OA_df = country_papers_OA_df.astype({'count_unknown_references':int})
univs_info, univs_not_found, univs_found = get_univ_papers_references_counts(country_papers_OA_df, univs_name)
all_countries_all_univs_OA_info[country_name] = univs_info
count_total_univs = len(univs_not_found) + len(univs_found)
not_found_details = {}
not_found_details['univ_names'] = univs_not_found
not_found_details['count_univs'] = len(univs_not_found)
not_found_details['percent_univs'] = (len(univs_not_found)*100.00)/count_total_univs
found_details = {}
found_details['univ_names'] = univs_found
found_details['count_univs'] = len(univs_found)
found_details['percent_univs'] = (len(univs_found)*100.00)/count_total_univs
all_details = {}
all_details['count_univs'] = count_total_univs
all_countries_univs_found_not_found[country_name]['not_found'] = not_found_details
all_countries_univs_found_not_found[country_name]['found'] = found_details
all_countries_univs_found_not_found[country_name]['all'] = all_details
print("Computed references counts for all univs in "+country_name+"\n")
# In[12]:
# Write text files with the infos
with open(join(output_dir,'all_countries_univs_found_not_found.txt'), 'w') as file:
file.write(json.dumps(all_countries_univs_found_not_found, sort_keys=True, indent=4, ensure_ascii=False))
with open(join(output_dir,'all_countries_all_univs_rc_info.txt'), 'w') as file:
file.write(json.dumps(all_countries_all_univs_OA_info, sort_keys=True, indent=4, ensure_ascii=False))
# In[ ]:
# # Load data from previously saved files
# In[13]:
with open(join(output_dir,'all_countries_all_univs_rc_info.txt')) as file:
all_countries_all_univs_OA_info = json.load(file)
# all_countries_all_univs_OA_info
# # Create bar plot for each of the countries
# In[14]:
def label_bar_with_value(ax, rects, value_labels):
"""
Attach a text label above each bar displaying its height
"""
for i in range(len(rects)):
rect = rects[i]
label_value = value_labels[i]
ax.text(rect.get_x() + rect.get_width()/2., 1.05*rect.get_height(),
'%s' % label_value,
ha='center', va='bottom')
def create_reference_count_distribution_bar_chart(univs_details, save_fname, x_label, save_file=True):
# https://chrisalbon.com/python/data_visualization/matplotlib_grouped_bar_plot/
# https://stackoverflow.com/a/42498711/530399
univs_name = [x for x in univs_details.keys()]
univs_data = univs_details.values()
univs_oa_reference_counts = [x['count_OA_referenced_papers'] for x in univs_data]
univs_unknown_reference_counts = [x['count_unknown_referenced_papers'] for x in univs_data]
raw_data = {'univs_name': univs_name,
'univs_oa_reference_counts': univs_oa_reference_counts,
'univs_unknown_reference_counts': univs_unknown_reference_counts
}
df = pd.DataFrame(raw_data, columns = ['univs_name', 'univs_oa_reference_counts', 'univs_unknown_reference_counts'])
# Compute proportion of univs_oa_reference_counts
df['proportion_univs_oa_reference_counts'] = (df['univs_oa_reference_counts'] / (df['univs_oa_reference_counts'] + df['univs_unknown_reference_counts'])) *100
# sort the df based on proportion of univs_oa_reference_counts
df = df.sort_values('proportion_univs_oa_reference_counts', ascending=False)[['univs_name', 'univs_oa_reference_counts','univs_unknown_reference_counts', 'proportion_univs_oa_reference_counts']]
# Setting the positions and width for the bars
pos = list(range(len(df['univs_name'])))
width = 0.25
# Plotting the bars
fig, ax = plt.subplots(figsize=(25,10))
# Create a bar with oa_reference_count data,
# in position pos,
oa_reference_count_bars = ax.bar(pos,
#using df['univs_oa_reference_counts'] data,
df['univs_oa_reference_counts'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color='green',
)
# Set heights based on the percentages
oa_reference_counts_proportion_value_labels = [str(int(x))+"%" for x in df['proportion_univs_oa_reference_counts'].values.tolist()]
# Create a bar with unknown_reference_count data,
# in position pos + some width buffer,
plt.bar([p + width for p in pos],
#using df['univs_unknown_reference_counts'] data,
df['univs_unknown_reference_counts'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color='red',
)
# Set the y axis label
ax.set_ylabel('Outgoing Reference Counts')
# Set the x axis label
ax.set_xlabel(x_label)
# Set the position of the x ticks
ax.set_xticks([p + 0.5 * width for p in pos])
# Set the labels for the x ticks
ax.set_xticklabels(df['univs_name'], rotation='vertical')
# Setting the x-axis and y-axis limits
plt.xlim(min(pos)-width, max(pos)+width*4)
plt.ylim([0, max(df['univs_oa_reference_counts'] + df['univs_unknown_reference_counts'])] )
# Adding the legend and showing the plot
plt.legend(['OA reference Counts', 'Unknown reference Counts'], loc='upper left')
plt.grid()
label_bar_with_value(ax, oa_reference_count_bars, oa_reference_counts_proportion_value_labels)
if save_file:
plt.savefig(save_fname+".png", bbox_inches='tight', dpi=300)
plt.savefig(save_fname+".pdf", bbox_inches='tight', dpi=900)
plt.close()
return fig
# In[15]:
country_name = 'austria'
univs_details = all_countries_all_univs_OA_info[country_name]
create_reference_count_distribution_bar_chart(univs_details, save_fname = join(output_dir,country_name+"_"+'referencescount_distribution'), x_label = ("Universities in "+cnames_for_plot[country_name]), save_file=False)
# In[16]:
for country_name, univs_details in all_countries_all_univs_OA_info.items():
create_reference_count_distribution_bar_chart(univs_details, save_fname = join(output_dir,country_name+"_"+'referencescount_distribution'), x_label = ("Universities in "+cnames_for_plot[country_name]), save_file=True)
# In[ ]:
# In[17]:
print("\n\n\nCompleted!!!")
# In[ ]:
# In[ ]:
# In[ ]:
| 30.623288
| 285
| 0.702677
|
8ba5db3f50b6fcb4bb1b5d7c89960b976b01764f
| 5,176
|
py
|
Python
|
vertex/test/test_standalone.py
|
twisted/vertex
|
feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca
|
[
"MIT"
] | 56
|
2015-01-09T03:52:07.000Z
|
2021-09-26T22:17:06.000Z
|
vertex/test/test_standalone.py
|
DalavanCloud/vertex
|
feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca
|
[
"MIT"
] | 34
|
2015-03-05T02:57:48.000Z
|
2017-05-23T22:34:13.000Z
|
vertex/test/test_standalone.py
|
DalavanCloud/vertex
|
feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca
|
[
"MIT"
] | 17
|
2015-04-17T02:03:16.000Z
|
2021-11-12T03:31:07.000Z
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{vertex.q2qstandalone}
"""
from pretend import call_recorder, call, stub
from twisted.internet import defer
from twisted.python.filepath import FilePath
from twisted.protocols.amp import AMP
from twisted.test.iosim import connect, makeFakeClient, makeFakeServer
from twisted.trial.unittest import TestCase, SynchronousTestCase
from vertex.q2q import Q2QAddress
from vertex.q2qadmin import AddUser, NotAllowed
from vertex.q2qstandalone import IdentityAdmin
from vertex.q2qstandalone import _UserStore
from vertex import ivertex
from zope.interface.verify import verifyObject
from ._fakes import _makeStubTxscrypt
class AddUserAdminTests(TestCase):
"""
Tests that IdentityAdmin can successfully add a user
"""
def setUp(self):
self.addUser = call_recorder(
lambda *args, **kwargs: defer.succeed("ignored")
)
store = stub(addUser=self.addUser)
self.adminFactory = stub(store=store)
def test_IdentityAdmin_responder_adds_user(self):
"""
L{IdentityAdmin} has a L{AddUser} responder.
"""
responder = IdentityAdmin().locateResponder(AddUser.commandName)
self.assertIsNotNone(responder)
def test_adds_user(self):
"""
When L{UserAdder} is connected to L{IdentityAdmin}, the L{AddUser}
command is called and L{IdentityAdmin} adds the user to its factory's
store.
"""
admin = IdentityAdmin()
admin.factory = self.adminFactory
serverTransport = makeFakeServer(admin)
serverTransport.getQ2QHost = lambda: Q2QAddress('Q2Q Host')
client = AMP()
pump = connect(admin, serverTransport, client, makeFakeClient(client))
d = client.callRemote(AddUser, name='q2q username',
password='q2q password')
pump.flush()
# The username and password are added, along with the domain=q2q
# host, to the IdentityAdmin's factory's store
self.assertEqual([call('Q2Q Host', 'q2q username', 'q2q password')],
self.addUser.calls)
# The server responds with {}
self.assertEqual({}, self.successResultOf(d))
class UserStoreTests(SynchronousTestCase):
"""
Tests for L{_UserStore}
"""
def setUp(self):
self.userPath = FilePath(self.mktemp())
self.userPath.makedirs()
self.addCleanup(self.userPath.remove)
self.makeUsers(self.userPath.path)
def makeUsers(self, path):
"""
Create a L{_UserStore} instance pointed at C{path}.
@param path: The path where the instance will store its
per-user files.
@type path: L{str}
"""
self.computeKeyReturns = defer.Deferred()
self.fakeTxscrypt = _makeStubTxscrypt(
computeKeyReturns=self.computeKeyReturns,
checkPasswordReturns=defer.Deferred(),
)
self.users = _UserStore(
path=path,
keyDeriver=self.fakeTxscrypt,
)
def test_providesIQ2QUserStore(self):
"""
The store provides L{ivertex.IQ2QUserStore}
"""
verifyObject(ivertex.IQ2QUserStore, self.users)
def assertStored(self, domain, username, password, key):
"""
Assert that C{password} is stored under C{user} and C{domain}.
@param domain: The user's 'domain.
@type domain: L{str}
@param username: The username.
@type username: L{str}
@param password: The password.
@type password: L{str}
@param key: The key "derived" from C{password}
@type key: L{str}
"""
storedDeferred = self.users.store(domain, username, password)
self.assertNoResult(storedDeferred)
self.computeKeyReturns.callback(key)
self.assertEqual(self.successResultOf(storedDeferred),
(domain, username))
def test_storeAndRetrieveKey(self):
"""
A key is derived for a password and stored under the domain
and user.
"""
domain, username, password, key = "domain", "user", "password", "key"
self.assertStored(domain, username, password, key)
self.assertEqual(self.users.key(domain, username), key)
def test_missingKey(self):
"""
The derived key for an unknown domain and user combination is
L{None}.
"""
self.assertIsNone(self.users.key("mystery domain", "mystery user"))
def test_storeExistingUser(self):
"""
Attempting to overwrite an existing user fails with
L{NotAllowed}
"""
domain, username, password, key = "domain", "user", "password", "key"
self.assertStored(domain, username, password, key)
self.makeUsers(self.userPath.path)
failure = self.failureResultOf(self.users.store(domain,
username,
password))
self.assertIsInstance(failure.value, NotAllowed)
| 29.747126
| 78
| 0.627512
|
bda16a828f3c4c702a9aa5748d4ca2d41b285036
| 167,967
|
py
|
Python
|
python-package/lightgbm/basic.py
|
TremaMiguel/LightGBM
|
da9072fde2b5cd7160866e7e9e49a14e7fba8bf3
|
[
"MIT"
] | null | null | null |
python-package/lightgbm/basic.py
|
TremaMiguel/LightGBM
|
da9072fde2b5cd7160866e7e9e49a14e7fba8bf3
|
[
"MIT"
] | null | null | null |
python-package/lightgbm/basic.py
|
TremaMiguel/LightGBM
|
da9072fde2b5cd7160866e7e9e49a14e7fba8bf3
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""Wrapper for C API of LightGBM."""
import abc
import ctypes
import json
import warnings
from collections import OrderedDict
from copy import deepcopy
from functools import wraps
from os import SEEK_END
from os.path import getsize
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
import numpy as np
import scipy.sparse
from .compat import PANDAS_INSTALLED, concat, dt_DataTable, pd_CategoricalDtype, pd_DataFrame, pd_Series
from .libpath import find_lib_path
ZERO_THRESHOLD = 1e-35
def _get_sample_count(total_nrow: int, params: str):
sample_cnt = ctypes.c_int(0)
_safe_call(_LIB.LGBM_GetSampleCount(
ctypes.c_int32(total_nrow),
c_str(params),
ctypes.byref(sample_cnt),
))
return sample_cnt.value
class _DummyLogger:
def info(self, msg: str) -> None:
print(msg)
def warning(self, msg: str) -> None:
warnings.warn(msg, stacklevel=3)
_LOGGER: Any = _DummyLogger()
_INFO_METHOD_NAME = "info"
_WARNING_METHOD_NAME = "warning"
def register_logger(
logger: Any, info_method_name: str = "info", warning_method_name: str = "warning"
) -> None:
"""Register custom logger.
Parameters
----------
logger : Any
Custom logger.
info_method_name : str, optional (default="info")
Method used to log info messages.
warning_method_name : str, optional (default="warning")
Method used to log warning messages.
"""
def _has_method(logger: Any, method_name: str) -> bool:
return callable(getattr(logger, method_name, None))
if not _has_method(logger, info_method_name) or not _has_method(logger, warning_method_name):
raise TypeError(
f"Logger must provide '{info_method_name}' and '{warning_method_name}' method"
)
global _LOGGER, _INFO_METHOD_NAME, _WARNING_METHOD_NAME
_LOGGER = logger
_INFO_METHOD_NAME = info_method_name
_WARNING_METHOD_NAME = warning_method_name
def _normalize_native_string(func: Callable[[str], None]) -> Callable[[str], None]:
"""Join log messages from native library which come by chunks."""
msg_normalized: List[str] = []
@wraps(func)
def wrapper(msg: str) -> None:
nonlocal msg_normalized
if msg.strip() == '':
msg = ''.join(msg_normalized)
msg_normalized = []
return func(msg)
else:
msg_normalized.append(msg)
return wrapper
def _log_info(msg: str) -> None:
getattr(_LOGGER, _INFO_METHOD_NAME)(msg)
def _log_warning(msg: str) -> None:
getattr(_LOGGER, _WARNING_METHOD_NAME)(msg)
@_normalize_native_string
def _log_native(msg: str) -> None:
getattr(_LOGGER, _INFO_METHOD_NAME)(msg)
def _log_callback(msg: bytes) -> None:
"""Redirect logs from native library into Python."""
_log_native(str(msg.decode('utf-8')))
def _load_lib():
"""Load LightGBM library."""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
lib.LGBM_GetLastError.restype = ctypes.c_char_p
callback = ctypes.CFUNCTYPE(None, ctypes.c_char_p)
lib.callback = callback(_log_callback)
if lib.LGBM_RegisterLogCallback(lib.callback) != 0:
raise LightGBMError(lib.LGBM_GetLastError().decode('utf-8'))
return lib
_LIB = _load_lib()
NUMERIC_TYPES = (int, float, bool)
_ArrayLike = Union[List, np.ndarray, pd_Series]
def _safe_call(ret: int) -> None:
"""Check the return value from C API call.
Parameters
----------
ret : int
The return value from C API calls.
"""
if ret != 0:
raise LightGBMError(_LIB.LGBM_GetLastError().decode('utf-8'))
def is_numeric(obj):
"""Check whether object is a number or not, include numpy number, etc."""
try:
float(obj)
return True
except (TypeError, ValueError):
# TypeError: obj is not a string or a number
# ValueError: invalid literal
return False
def is_numpy_1d_array(data):
"""Check whether data is a numpy 1-D array."""
return isinstance(data, np.ndarray) and len(data.shape) == 1
def is_numpy_column_array(data):
"""Check whether data is a column numpy array."""
if not isinstance(data, np.ndarray):
return False
shape = data.shape
return len(shape) == 2 and shape[1] == 1
def cast_numpy_array_to_dtype(array, dtype):
"""Cast numpy array to given dtype."""
if array.dtype == dtype:
return array
return array.astype(dtype=dtype, copy=False)
def is_1d_list(data):
"""Check whether data is a 1-D list."""
return isinstance(data, list) and (not data or is_numeric(data[0]))
def _is_1d_collection(data: Any) -> bool:
"""Check whether data is a 1-D collection."""
return (
is_numpy_1d_array(data)
or is_numpy_column_array(data)
or is_1d_list(data)
or isinstance(data, pd_Series)
)
def list_to_1d_numpy(data, dtype=np.float32, name='list'):
"""Convert data to numpy 1-D array."""
if is_numpy_1d_array(data):
return cast_numpy_array_to_dtype(data, dtype)
elif is_numpy_column_array(data):
_log_warning('Converting column-vector to 1d array')
array = data.ravel()
return cast_numpy_array_to_dtype(array, dtype)
elif is_1d_list(data):
return np.array(data, dtype=dtype, copy=False)
elif isinstance(data, pd_Series):
_check_for_bad_pandas_dtypes(data.to_frame().dtypes)
return np.array(data, dtype=dtype, copy=False) # SparseArray should be supported as well
else:
raise TypeError(f"Wrong type({type(data).__name__}) for {name}.\n"
"It should be list, numpy 1-D array or pandas Series")
def _is_numpy_2d_array(data: Any) -> bool:
"""Check whether data is a numpy 2-D array."""
return isinstance(data, np.ndarray) and len(data.shape) == 2 and data.shape[1] > 1
def _is_2d_list(data: Any) -> bool:
"""Check whether data is a 2-D list."""
return isinstance(data, list) and len(data) > 0 and is_1d_list(data[0])
def _is_2d_collection(data: Any) -> bool:
"""Check whether data is a 2-D collection."""
return (
_is_numpy_2d_array(data)
or _is_2d_list(data)
or isinstance(data, pd_DataFrame)
)
def _data_to_2d_numpy(data: Any, dtype: type = np.float32, name: str = 'list') -> np.ndarray:
"""Convert data to numpy 2-D array."""
if _is_numpy_2d_array(data):
return cast_numpy_array_to_dtype(data, dtype)
if _is_2d_list(data):
return np.array(data, dtype=dtype)
if isinstance(data, pd_DataFrame):
_check_for_bad_pandas_dtypes(data.dtypes)
return cast_numpy_array_to_dtype(data.values, dtype)
raise TypeError(f"Wrong type({type(data).__name__}) for {name}.\n"
"It should be list of lists, numpy 2-D array or pandas DataFrame")
def cfloat32_array_to_numpy(cptr, length):
"""Convert a ctypes float pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_float)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected float pointer')
def cfloat64_array_to_numpy(cptr, length):
"""Convert a ctypes double pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_double)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected double pointer')
def cint32_array_to_numpy(cptr, length):
"""Convert a ctypes int pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int32)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected int32 pointer')
def cint64_array_to_numpy(cptr, length):
"""Convert a ctypes int pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int64)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected int64 pointer')
def c_str(string):
"""Convert a Python string to C string."""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Convert a Python array to C array."""
return (ctype * len(values))(*values)
def json_default_with_numpy(obj):
"""Convert numpy classes to JSON serializable objects."""
if isinstance(obj, (np.integer, np.floating, np.bool_)):
return obj.item()
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj
def param_dict_to_str(data):
"""Convert Python dictionary to string, which is passed to C API."""
if data is None or not data:
return ""
pairs = []
for key, val in data.items():
if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val):
def to_string(x):
if isinstance(x, list):
return f"[{','.join(map(str, x))}]"
else:
return str(x)
pairs.append(f"{key}={','.join(map(to_string, val))}")
elif isinstance(val, (str, Path, NUMERIC_TYPES)) or is_numeric(val):
pairs.append(f"{key}={val}")
elif val is not None:
raise TypeError(f'Unknown type of parameter:{key}, got:{type(val).__name__}')
return ' '.join(pairs)
class _TempFile:
"""Proxy class to workaround errors on Windows."""
def __enter__(self):
with NamedTemporaryFile(prefix="lightgbm_tmp_", delete=True) as f:
self.name = f.name
self.path = Path(self.name)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.path.is_file():
self.path.unlink()
class LightGBMError(Exception):
"""Error thrown by LightGBM."""
pass
# DeprecationWarning is not shown by default, so let's create our own with higher level
class LGBMDeprecationWarning(UserWarning):
"""Custom deprecation warning."""
pass
class _ConfigAliases:
# lazy evaluation to allow import without dynamic library, e.g., for docs generation
aliases = None
@staticmethod
def _get_all_param_aliases() -> Dict[str, Set[str]]:
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_DumpParamAliases(
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, re-allocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_DumpParamAliases(
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
aliases = json.loads(
string_buffer.value.decode('utf-8'),
object_hook=lambda obj: {k: set(v) | {k} for k, v in obj.items()}
)
return aliases
@classmethod
def get(cls, *args) -> Set[str]:
if cls.aliases is None:
cls.aliases = cls._get_all_param_aliases()
ret = set()
for i in args:
ret |= cls.aliases.get(i, {i})
return ret
@classmethod
def get_by_alias(cls, *args) -> Set[str]:
if cls.aliases is None:
cls.aliases = cls._get_all_param_aliases()
ret = set(args)
for arg in args:
for aliases in cls.aliases.values():
if arg in aliases:
ret |= aliases
break
return ret
def _choose_param_value(main_param_name: str, params: Dict[str, Any], default_value: Any) -> Dict[str, Any]:
"""Get a single parameter value, accounting for aliases.
Parameters
----------
main_param_name : str
Name of the main parameter to get a value for. One of the keys of ``_ConfigAliases``.
params : dict
Dictionary of LightGBM parameters.
default_value : Any
Default value to use for the parameter, if none is found in ``params``.
Returns
-------
params : dict
A ``params`` dict with exactly one value for ``main_param_name``, and all aliases ``main_param_name`` removed.
If both ``main_param_name`` and one or more aliases for it are found, the value of ``main_param_name`` will be preferred.
"""
# avoid side effects on passed-in parameters
params = deepcopy(params)
# find a value, and remove other aliases with .pop()
# prefer the value of 'main_param_name' if it exists, otherwise search the aliases
found_value = None
if main_param_name in params.keys():
found_value = params[main_param_name]
for param in _ConfigAliases.get(main_param_name):
val = params.pop(param, None)
if found_value is None and val is not None:
found_value = val
if found_value is not None:
params[main_param_name] = found_value
else:
params[main_param_name] = default_value
return params
MAX_INT32 = (1 << 31) - 1
"""Macro definition of data type in C API of LightGBM"""
C_API_DTYPE_FLOAT32 = 0
C_API_DTYPE_FLOAT64 = 1
C_API_DTYPE_INT32 = 2
C_API_DTYPE_INT64 = 3
"""Matrix is row major in Python"""
C_API_IS_ROW_MAJOR = 1
"""Macro definition of prediction type in C API of LightGBM"""
C_API_PREDICT_NORMAL = 0
C_API_PREDICT_RAW_SCORE = 1
C_API_PREDICT_LEAF_INDEX = 2
C_API_PREDICT_CONTRIB = 3
"""Macro definition of sparse matrix type"""
C_API_MATRIX_TYPE_CSR = 0
C_API_MATRIX_TYPE_CSC = 1
"""Macro definition of feature importance type"""
C_API_FEATURE_IMPORTANCE_SPLIT = 0
C_API_FEATURE_IMPORTANCE_GAIN = 1
"""Data type of data field"""
FIELD_TYPE_MAPPER = {"label": C_API_DTYPE_FLOAT32,
"weight": C_API_DTYPE_FLOAT32,
"init_score": C_API_DTYPE_FLOAT64,
"group": C_API_DTYPE_INT32}
"""String name to int feature importance type mapper"""
FEATURE_IMPORTANCE_TYPE_MAPPER = {"split": C_API_FEATURE_IMPORTANCE_SPLIT,
"gain": C_API_FEATURE_IMPORTANCE_GAIN}
def convert_from_sliced_object(data):
"""Fix the memory of multi-dimensional sliced object."""
if isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray):
if not data.flags.c_contiguous:
_log_warning("Usage of np.ndarray subset (sliced data) is not recommended "
"due to it will double the peak memory cost in LightGBM.")
return np.copy(data)
return data
def c_float_array(data):
"""Get pointer of float numpy array / list."""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
data = convert_from_sliced_object(data)
assert data.flags.c_contiguous
if data.dtype == np.float32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
type_data = C_API_DTYPE_FLOAT32
elif data.dtype == np.float64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
type_data = C_API_DTYPE_FLOAT64
else:
raise TypeError(f"Expected np.float32 or np.float64, met type({data.dtype})")
else:
raise TypeError(f"Unknown type({type(data).__name__})")
return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed
def c_int_array(data):
"""Get pointer of int numpy array / list."""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
data = convert_from_sliced_object(data)
assert data.flags.c_contiguous
if data.dtype == np.int32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
type_data = C_API_DTYPE_INT32
elif data.dtype == np.int64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64))
type_data = C_API_DTYPE_INT64
else:
raise TypeError(f"Expected np.int32 or np.int64, met type({data.dtype})")
else:
raise TypeError(f"Unknown type({type(data).__name__})")
return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed
def _check_for_bad_pandas_dtypes(pandas_dtypes_series):
float128 = getattr(np, 'float128', type(None))
def is_allowed_numpy_dtype(dtype):
return (
issubclass(dtype, (np.integer, np.floating, np.bool_))
and not issubclass(dtype, (np.timedelta64, float128))
)
bad_pandas_dtypes = [
f'{column_name}: {pandas_dtype}'
for column_name, pandas_dtype in pandas_dtypes_series.iteritems()
if not is_allowed_numpy_dtype(pandas_dtype.type)
]
if bad_pandas_dtypes:
raise ValueError('pandas dtypes must be int, float or bool.\n'
f'Fields with bad pandas dtypes: {", ".join(bad_pandas_dtypes)}')
def _data_from_pandas(data, feature_name, categorical_feature, pandas_categorical):
if isinstance(data, pd_DataFrame):
if len(data.shape) != 2 or data.shape[0] < 1:
raise ValueError('Input data must be 2 dimensional and non empty.')
if feature_name == 'auto' or feature_name is None:
data = data.rename(columns=str)
cat_cols = [col for col, dtype in zip(data.columns, data.dtypes) if isinstance(dtype, pd_CategoricalDtype)]
cat_cols_not_ordered = [col for col in cat_cols if not data[col].cat.ordered]
if pandas_categorical is None: # train dataset
pandas_categorical = [list(data[col].cat.categories) for col in cat_cols]
else:
if len(cat_cols) != len(pandas_categorical):
raise ValueError('train and valid dataset categorical_feature do not match.')
for col, category in zip(cat_cols, pandas_categorical):
if list(data[col].cat.categories) != list(category):
data[col] = data[col].cat.set_categories(category)
if len(cat_cols): # cat_cols is list
data = data.copy() # not alter origin DataFrame
data[cat_cols] = data[cat_cols].apply(lambda x: x.cat.codes).replace({-1: np.nan})
if categorical_feature is not None:
if feature_name is None:
feature_name = list(data.columns)
if categorical_feature == 'auto': # use cat cols from DataFrame
categorical_feature = cat_cols_not_ordered
else: # use cat cols specified by user
categorical_feature = list(categorical_feature)
if feature_name == 'auto':
feature_name = list(data.columns)
_check_for_bad_pandas_dtypes(data.dtypes)
df_dtypes = [dtype.type for dtype in data.dtypes]
df_dtypes.append(np.float32) # so that the target dtype considers floats
target_dtype = np.find_common_type(df_dtypes, [])
data = data.astype(target_dtype, copy=False).values
else:
if feature_name == 'auto':
feature_name = None
if categorical_feature == 'auto':
categorical_feature = None
return data, feature_name, categorical_feature, pandas_categorical
def _label_from_pandas(label):
if isinstance(label, pd_DataFrame):
if len(label.columns) > 1:
raise ValueError('DataFrame for label cannot have multiple columns')
_check_for_bad_pandas_dtypes(label.dtypes)
label = np.ravel(label.values.astype(np.float32, copy=False))
return label
def _dump_pandas_categorical(pandas_categorical, file_name=None):
categorical_json = json.dumps(pandas_categorical, default=json_default_with_numpy)
pandas_str = f'\npandas_categorical:{categorical_json}\n'
if file_name is not None:
with open(file_name, 'a') as f:
f.write(pandas_str)
return pandas_str
def _load_pandas_categorical(file_name=None, model_str=None):
pandas_key = 'pandas_categorical:'
offset = -len(pandas_key)
if file_name is not None:
max_offset = -getsize(file_name)
with open(file_name, 'rb') as f:
while True:
if offset < max_offset:
offset = max_offset
f.seek(offset, SEEK_END)
lines = f.readlines()
if len(lines) >= 2:
break
offset *= 2
last_line = lines[-1].decode('utf-8').strip()
if not last_line.startswith(pandas_key):
last_line = lines[-2].decode('utf-8').strip()
elif model_str is not None:
idx = model_str.rfind('\n', 0, offset)
last_line = model_str[idx:].strip()
if last_line.startswith(pandas_key):
return json.loads(last_line[len(pandas_key):])
else:
return None
class Sequence(abc.ABC):
"""
Generic data access interface.
Object should support the following operations:
.. code-block::
# Get total row number.
>>> len(seq)
# Random access by row index. Used for data sampling.
>>> seq[10]
# Range data access. Used to read data in batch when constructing Dataset.
>>> seq[0:100]
# Optionally specify batch_size to control range data read size.
>>> seq.batch_size
- With random access, **data sampling does not need to go through all data**.
- With range data access, there's **no need to read all data into memory thus reduce memory usage**.
.. versionadded:: 3.3.0
Attributes
----------
batch_size : int
Default size of a batch.
"""
batch_size = 4096 # Defaults to read 4K rows in each batch.
@abc.abstractmethod
def __getitem__(self, idx: Union[int, slice, List[int]]) -> np.ndarray:
"""Return data for given row index.
A basic implementation should look like this:
.. code-block:: python
if isinstance(idx, numbers.Integral):
return self._get_one_line(idx)
elif isinstance(idx, slice):
return np.stack([self._get_one_line(i) for i in range(idx.start, idx.stop)])
elif isinstance(idx, list):
# Only required if using ``Dataset.subset()``.
return np.array([self._get_one_line(i) for i in idx])
else:
raise TypeError(f"Sequence index must be integer, slice or list, got {type(idx).__name__}")
Parameters
----------
idx : int, slice[int], list[int]
Item index.
Returns
-------
result : numpy 1-D array or numpy 2-D array
1-D array if idx is int, 2-D array if idx is slice or list.
"""
raise NotImplementedError("Sub-classes of lightgbm.Sequence must implement __getitem__()")
@abc.abstractmethod
def __len__(self) -> int:
"""Return row count of this sequence."""
raise NotImplementedError("Sub-classes of lightgbm.Sequence must implement __len__()")
class _InnerPredictor:
"""_InnerPredictor of LightGBM.
Not exposed to user.
Used only for prediction, usually used for continued training.
.. note::
Can be converted from Booster, but cannot be converted to Booster.
"""
def __init__(self, model_file=None, booster_handle=None, pred_parameter=None):
"""Initialize the _InnerPredictor.
Parameters
----------
model_file : str, pathlib.Path or None, optional (default=None)
Path to the model file.
booster_handle : object or None, optional (default=None)
Handle of Booster.
pred_parameter: dict or None, optional (default=None)
Other parameters for the prediction.
"""
self.handle = ctypes.c_void_p()
self.__is_manage_handle = True
if model_file is not None:
"""Prediction task"""
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(str(model_file)),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = out_num_iterations.value
self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
elif booster_handle is not None:
self.__is_manage_handle = False
self.handle = booster_handle
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = self.current_iteration()
self.pandas_categorical = None
else:
raise TypeError('Need model_file or booster_handle to create a predictor')
pred_parameter = {} if pred_parameter is None else pred_parameter
self.pred_parameter = param_dict_to_str(pred_parameter)
def __del__(self):
try:
if self.__is_manage_handle:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
except AttributeError:
pass
def __getstate__(self):
this = self.__dict__.copy()
this.pop('handle', None)
return this
def predict(self, data, start_iteration=0, num_iteration=-1,
raw_score=False, pred_leaf=False, pred_contrib=False, data_has_header=False):
"""Predict logic.
Parameters
----------
data : str, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for prediction.
If str or pathlib.Path, it represents the path to a text file (CSV, TSV, or LibSVM).
start_iteration : int, optional (default=0)
Start index of the iteration to predict.
num_iteration : int, optional (default=-1)
Iteration used for prediction.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
data_has_header : bool, optional (default=False)
Whether data has header.
Used only for txt data.
Returns
-------
result : numpy array, scipy.sparse or list of scipy.sparse
Prediction result.
Can be sparse or a list of sparse objects (each element represents predictions for one class) for feature contributions (when ``pred_contrib=True``).
"""
if isinstance(data, Dataset):
raise TypeError("Cannot use Dataset instance for prediction, please use raw data instead")
data = _data_from_pandas(data, None, None, self.pandas_categorical)[0]
predict_type = C_API_PREDICT_NORMAL
if raw_score:
predict_type = C_API_PREDICT_RAW_SCORE
if pred_leaf:
predict_type = C_API_PREDICT_LEAF_INDEX
if pred_contrib:
predict_type = C_API_PREDICT_CONTRIB
int_data_has_header = 1 if data_has_header else 0
if isinstance(data, (str, Path)):
with _TempFile() as f:
_safe_call(_LIB.LGBM_BoosterPredictForFile(
self.handle,
c_str(str(data)),
ctypes.c_int(int_data_has_header),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
c_str(f.name)))
preds = np.loadtxt(f.name, dtype=np.float64)
nrow = preds.shape[0]
elif isinstance(data, scipy.sparse.csr_matrix):
preds, nrow = self.__pred_for_csr(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, scipy.sparse.csc_matrix):
preds, nrow = self.__pred_for_csc(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, np.ndarray):
preds, nrow = self.__pred_for_np2d(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, list):
try:
data = np.array(data)
except BaseException:
raise ValueError('Cannot convert data list to numpy array.')
preds, nrow = self.__pred_for_np2d(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, dt_DataTable):
preds, nrow = self.__pred_for_np2d(data.to_numpy(), start_iteration, num_iteration, predict_type)
else:
try:
_log_warning('Converting data to scipy sparse matrix.')
csr = scipy.sparse.csr_matrix(data)
except BaseException:
raise TypeError(f'Cannot predict data for type {type(data).__name__}')
preds, nrow = self.__pred_for_csr(csr, start_iteration, num_iteration, predict_type)
if pred_leaf:
preds = preds.astype(np.int32)
is_sparse = scipy.sparse.issparse(preds) or isinstance(preds, list)
if not is_sparse and preds.size != nrow:
if preds.size % nrow == 0:
preds = preds.reshape(nrow, -1)
else:
raise ValueError(f'Length of predict result ({preds.size}) cannot be divide nrow ({nrow})')
return preds
def __get_num_preds(self, start_iteration, num_iteration, nrow, predict_type):
"""Get size of prediction result."""
if nrow > MAX_INT32:
raise LightGBMError('LightGBM cannot perform prediction for data '
f'with number of rows greater than MAX_INT32 ({MAX_INT32}).\n'
'You can split your data into chunks '
'and then concatenate predictions for them')
n_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterCalcNumPredict(
self.handle,
ctypes.c_int(nrow),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.byref(n_preds)))
return n_preds.value
def __pred_for_np2d(self, mat, start_iteration, num_iteration, predict_type):
"""Predict for a 2-D numpy matrix."""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray or list must be 2 dimensional')
def inner_predict(mat, start_iteration, num_iteration, predict_type, preds=None):
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
n_preds = self.__get_num_preds(start_iteration, num_iteration, mat.shape[0], predict_type)
if preds is None:
preds = np.empty(n_preds, dtype=np.float64)
elif len(preds.shape) != 1 or len(preds) != n_preds:
raise ValueError("Wrong length of pre-allocated predict array")
out_num_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterPredictForMat(
self.handle,
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int32(mat.shape[0]),
ctypes.c_int32(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, mat.shape[0]
nrow = mat.shape[0]
if nrow > MAX_INT32:
sections = np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)
# __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
n_preds = [self.__get_num_preds(start_iteration, num_iteration, i, predict_type) for i in np.diff([0] + list(sections) + [nrow])]
n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
preds = np.empty(sum(n_preds), dtype=np.float64)
for chunk, (start_idx_pred, end_idx_pred) in zip(np.array_split(mat, sections),
zip(n_preds_sections, n_preds_sections[1:])):
# avoid memory consumption by arrays concatenation operations
inner_predict(chunk, start_iteration, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
return preds, nrow
else:
return inner_predict(mat, start_iteration, num_iteration, predict_type)
def __create_sparse_native(self, cs, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
indptr_type, data_type, is_csr=True):
# create numpy array from output arrays
data_indices_len = out_shape[0]
indptr_len = out_shape[1]
if indptr_type == C_API_DTYPE_INT32:
out_indptr = cint32_array_to_numpy(out_ptr_indptr, indptr_len)
elif indptr_type == C_API_DTYPE_INT64:
out_indptr = cint64_array_to_numpy(out_ptr_indptr, indptr_len)
else:
raise TypeError("Expected int32 or int64 type for indptr")
if data_type == C_API_DTYPE_FLOAT32:
out_data = cfloat32_array_to_numpy(out_ptr_data, data_indices_len)
elif data_type == C_API_DTYPE_FLOAT64:
out_data = cfloat64_array_to_numpy(out_ptr_data, data_indices_len)
else:
raise TypeError("Expected float32 or float64 type for data")
out_indices = cint32_array_to_numpy(out_ptr_indices, data_indices_len)
# break up indptr based on number of rows (note more than one matrix in multiclass case)
per_class_indptr_shape = cs.indptr.shape[0]
# for CSC there is extra column added
if not is_csr:
per_class_indptr_shape += 1
out_indptr_arrays = np.split(out_indptr, out_indptr.shape[0] / per_class_indptr_shape)
# reformat output into a csr or csc matrix or list of csr or csc matrices
cs_output_matrices = []
offset = 0
for cs_indptr in out_indptr_arrays:
matrix_indptr_len = cs_indptr[cs_indptr.shape[0] - 1]
cs_indices = out_indices[offset + cs_indptr[0]:offset + matrix_indptr_len]
cs_data = out_data[offset + cs_indptr[0]:offset + matrix_indptr_len]
offset += matrix_indptr_len
# same shape as input csr or csc matrix except extra column for expected value
cs_shape = [cs.shape[0], cs.shape[1] + 1]
# note: make sure we copy data as it will be deallocated next
if is_csr:
cs_output_matrices.append(scipy.sparse.csr_matrix((cs_data, cs_indices, cs_indptr), cs_shape))
else:
cs_output_matrices.append(scipy.sparse.csc_matrix((cs_data, cs_indices, cs_indptr), cs_shape))
# free the temporary native indptr, indices, and data
_safe_call(_LIB.LGBM_BoosterFreePredictSparse(out_ptr_indptr, out_ptr_indices, out_ptr_data,
ctypes.c_int(indptr_type), ctypes.c_int(data_type)))
if len(cs_output_matrices) == 1:
return cs_output_matrices[0]
return cs_output_matrices
def __pred_for_csr(self, csr, start_iteration, num_iteration, predict_type):
"""Predict for a CSR data."""
def inner_predict(csr, start_iteration, num_iteration, predict_type, preds=None):
nrow = len(csr.indptr) - 1
n_preds = self.__get_num_preds(start_iteration, num_iteration, nrow, predict_type)
if preds is None:
preds = np.empty(n_preds, dtype=np.float64)
elif len(preds.shape) != 1 or len(preds) != n_preds:
raise ValueError("Wrong length of pre-allocated predict array")
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
assert csr.shape[1] <= MAX_INT32
csr_indices = csr.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_BoosterPredictForCSR(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
def inner_predict_sparse(csr, start_iteration, num_iteration, predict_type):
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
csr_indices = csr.indices.astype(np.int32, copy=False)
matrix_type = C_API_MATRIX_TYPE_CSR
if type_ptr_indptr == C_API_DTYPE_INT32:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int32)()
else:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int64)()
out_ptr_indices = ctypes.POINTER(ctypes.c_int32)()
if type_ptr_data == C_API_DTYPE_FLOAT32:
out_ptr_data = ctypes.POINTER(ctypes.c_float)()
else:
out_ptr_data = ctypes.POINTER(ctypes.c_double)()
out_shape = np.empty(2, dtype=np.int64)
_safe_call(_LIB.LGBM_BoosterPredictSparseOutput(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.c_int(matrix_type),
out_shape.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)),
ctypes.byref(out_ptr_indptr),
ctypes.byref(out_ptr_indices),
ctypes.byref(out_ptr_data)))
matrices = self.__create_sparse_native(csr, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
type_ptr_indptr, type_ptr_data, is_csr=True)
nrow = len(csr.indptr) - 1
return matrices, nrow
if predict_type == C_API_PREDICT_CONTRIB:
return inner_predict_sparse(csr, start_iteration, num_iteration, predict_type)
nrow = len(csr.indptr) - 1
if nrow > MAX_INT32:
sections = [0] + list(np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)) + [nrow]
# __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
n_preds = [self.__get_num_preds(start_iteration, num_iteration, i, predict_type) for i in np.diff(sections)]
n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
preds = np.empty(sum(n_preds), dtype=np.float64)
for (start_idx, end_idx), (start_idx_pred, end_idx_pred) in zip(zip(sections, sections[1:]),
zip(n_preds_sections, n_preds_sections[1:])):
# avoid memory consumption by arrays concatenation operations
inner_predict(csr[start_idx:end_idx], start_iteration, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
return preds, nrow
else:
return inner_predict(csr, start_iteration, num_iteration, predict_type)
def __pred_for_csc(self, csc, start_iteration, num_iteration, predict_type):
"""Predict for a CSC data."""
def inner_predict_sparse(csc, start_iteration, num_iteration, predict_type):
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
csc_indices = csc.indices.astype(np.int32, copy=False)
matrix_type = C_API_MATRIX_TYPE_CSC
if type_ptr_indptr == C_API_DTYPE_INT32:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int32)()
else:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int64)()
out_ptr_indices = ctypes.POINTER(ctypes.c_int32)()
if type_ptr_data == C_API_DTYPE_FLOAT32:
out_ptr_data = ctypes.POINTER(ctypes.c_float)()
else:
out_ptr_data = ctypes.POINTER(ctypes.c_double)()
out_shape = np.empty(2, dtype=np.int64)
_safe_call(_LIB.LGBM_BoosterPredictSparseOutput(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.c_int(matrix_type),
out_shape.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)),
ctypes.byref(out_ptr_indptr),
ctypes.byref(out_ptr_indices),
ctypes.byref(out_ptr_data)))
matrices = self.__create_sparse_native(csc, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
type_ptr_indptr, type_ptr_data, is_csr=False)
nrow = csc.shape[0]
return matrices, nrow
nrow = csc.shape[0]
if nrow > MAX_INT32:
return self.__pred_for_csr(csc.tocsr(), start_iteration, num_iteration, predict_type)
if predict_type == C_API_PREDICT_CONTRIB:
return inner_predict_sparse(csc, start_iteration, num_iteration, predict_type)
n_preds = self.__get_num_preds(start_iteration, num_iteration, nrow, predict_type)
preds = np.empty(n_preds, dtype=np.float64)
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
assert csc.shape[0] <= MAX_INT32
csc_indices = csc.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_BoosterPredictForCSC(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
def current_iteration(self):
"""Get the index of the current iteration.
Returns
-------
cur_iter : int
The index of the current iteration.
"""
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
class Dataset:
"""Dataset in LightGBM."""
def __init__(self, data, label=None, reference=None,
weight=None, group=None, init_score=None,
feature_name='auto', categorical_feature='auto', params=None,
free_raw_data=True):
"""Initialize Dataset.
Parameters
----------
data : str, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, Sequence, list of Sequence or list of numpy array
Data source of Dataset.
If str or pathlib.Path, it represents the path to a text file (CSV, TSV, or LibSVM) or a LightGBM Dataset binary file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Label of the data.
reference : Dataset or None, optional (default=None)
If this is Dataset for validation, training data should be used as reference.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance. Weights should be non-negative.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
init_score : list, list of lists (for multi-class task), numpy array, pandas Series, pandas DataFrame (for multi-class task), or None, optional (default=None)
Init score for Dataset.
feature_name : list of str, or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of str or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
All values in categorical features will be cast to int32 and thus should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
The output cannot be monotonically constrained with respect to a categorical feature.
Floating point numbers in categorical features will be rounded towards 0.
params : dict or None, optional (default=None)
Other parameters for Dataset.
free_raw_data : bool, optional (default=True)
If True, raw data is freed after constructing inner Dataset.
"""
self.handle = None
self.data = data
self.label = label
self.reference = reference
self.weight = weight
self.group = group
self.init_score = init_score
self.feature_name = feature_name
self.categorical_feature = categorical_feature
self.params = deepcopy(params)
self.free_raw_data = free_raw_data
self.used_indices = None
self.need_slice = True
self._predictor = None
self.pandas_categorical = None
self.params_back_up = None
self.feature_penalty = None
self.monotone_constraints = None
self.version = 0
self._start_row = 0 # Used when pushing rows one by one.
def __del__(self):
try:
self._free_handle()
except AttributeError:
pass
def _create_sample_indices(self, total_nrow: int) -> np.ndarray:
"""Get an array of randomly chosen indices from this ``Dataset``.
Indices are sampled without replacement.
Parameters
----------
total_nrow : int
Total number of rows to sample from.
If this value is greater than the value of parameter ``bin_construct_sample_cnt``, only ``bin_construct_sample_cnt`` indices will be used.
If Dataset has multiple input data, this should be the sum of rows of every file.
Returns
-------
indices : numpy array
Indices for sampled data.
"""
param_str = param_dict_to_str(self.get_params())
sample_cnt = _get_sample_count(total_nrow, param_str)
indices = np.empty(sample_cnt, dtype=np.int32)
ptr_data, _, _ = c_int_array(indices)
actual_sample_cnt = ctypes.c_int32(0)
_safe_call(_LIB.LGBM_SampleIndices(
ctypes.c_int32(total_nrow),
c_str(param_str),
ptr_data,
ctypes.byref(actual_sample_cnt),
))
assert sample_cnt == actual_sample_cnt.value
return indices
def _init_from_ref_dataset(self, total_nrow: int, ref_dataset: 'Dataset') -> 'Dataset':
"""Create dataset from a reference dataset.
Parameters
----------
total_nrow : int
Number of rows expected to add to dataset.
ref_dataset : Dataset
Reference dataset to extract meta from.
Returns
-------
self : Dataset
Constructed Dataset object.
"""
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateByReference(
ref_dataset,
ctypes.c_int64(total_nrow),
ctypes.byref(self.handle),
))
return self
def _init_from_sample(
self,
sample_data: List[np.ndarray],
sample_indices: List[np.ndarray],
sample_cnt: int,
total_nrow: int,
) -> "Dataset":
"""Create Dataset from sampled data structures.
Parameters
----------
sample_data : list of numpy array
Sample data for each column.
sample_indices : list of numpy array
Sample data row index for each column.
sample_cnt : int
Number of samples.
total_nrow : int
Total number of rows for all input files.
Returns
-------
self : Dataset
Constructed Dataset object.
"""
ncol = len(sample_indices)
assert len(sample_data) == ncol, "#sample data column != #column indices"
for i in range(ncol):
if sample_data[i].dtype != np.double:
raise ValueError(f"sample_data[{i}] type {sample_data[i].dtype} is not double")
if sample_indices[i].dtype != np.int32:
raise ValueError(f"sample_indices[{i}] type {sample_indices[i].dtype} is not int32")
# c type: double**
# each double* element points to start of each column of sample data.
sample_col_ptr = (ctypes.POINTER(ctypes.c_double) * ncol)()
# c type int**
# each int* points to start of indices for each column
indices_col_ptr = (ctypes.POINTER(ctypes.c_int32) * ncol)()
for i in range(ncol):
sample_col_ptr[i] = c_float_array(sample_data[i])[0]
indices_col_ptr[i] = c_int_array(sample_indices[i])[0]
num_per_col = np.array([len(d) for d in sample_indices], dtype=np.int32)
num_per_col_ptr, _, _ = c_int_array(num_per_col)
self.handle = ctypes.c_void_p()
params_str = param_dict_to_str(self.get_params())
_safe_call(_LIB.LGBM_DatasetCreateFromSampledColumn(
ctypes.cast(sample_col_ptr, ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
ctypes.cast(indices_col_ptr, ctypes.POINTER(ctypes.POINTER(ctypes.c_int32))),
ctypes.c_int32(ncol),
num_per_col_ptr,
ctypes.c_int32(sample_cnt),
ctypes.c_int32(total_nrow),
c_str(params_str),
ctypes.byref(self.handle),
))
return self
def _push_rows(self, data: np.ndarray) -> 'Dataset':
"""Add rows to Dataset.
Parameters
----------
data : numpy 1-D array
New data to add to the Dataset.
Returns
-------
self : Dataset
Dataset object.
"""
nrow, ncol = data.shape
data = data.reshape(data.size)
data_ptr, data_type, _ = c_float_array(data)
_safe_call(_LIB.LGBM_DatasetPushRows(
self.handle,
data_ptr,
data_type,
ctypes.c_int32(nrow),
ctypes.c_int32(ncol),
ctypes.c_int32(self._start_row),
))
self._start_row += nrow
return self
def get_params(self):
"""Get the used parameters in the Dataset.
Returns
-------
params : dict or None
The used parameters in this Dataset object.
"""
if self.params is not None:
# no min_data, nthreads and verbose in this function
dataset_params = _ConfigAliases.get("bin_construct_sample_cnt",
"categorical_feature",
"data_random_seed",
"enable_bundle",
"feature_pre_filter",
"forcedbins_filename",
"group_column",
"header",
"ignore_column",
"is_enable_sparse",
"label_column",
"linear_tree",
"max_bin",
"max_bin_by_feature",
"min_data_in_bin",
"pre_partition",
"precise_float_parser",
"two_round",
"use_missing",
"weight_column",
"zero_as_missing")
return {k: v for k, v in self.params.items() if k in dataset_params}
def _free_handle(self):
if self.handle is not None:
_safe_call(_LIB.LGBM_DatasetFree(self.handle))
self.handle = None
self.need_slice = True
if self.used_indices is not None:
self.data = None
return self
def _set_init_score_by_predictor(self, predictor, data, used_indices=None):
data_has_header = False
if isinstance(data, (str, Path)):
# check data has header or not
data_has_header = any(self.params.get(alias, False) for alias in _ConfigAliases.get("header"))
num_data = self.num_data()
if predictor is not None:
init_score = predictor.predict(data,
raw_score=True,
data_has_header=data_has_header)
init_score = init_score.ravel()
if used_indices is not None:
assert not self.need_slice
if isinstance(data, (str, Path)):
sub_init_score = np.empty(num_data * predictor.num_class, dtype=np.float64)
assert num_data == len(used_indices)
for i in range(len(used_indices)):
for j in range(predictor.num_class):
sub_init_score[i * predictor.num_class + j] = init_score[used_indices[i] * predictor.num_class + j]
init_score = sub_init_score
if predictor.num_class > 1:
# need to regroup init_score
new_init_score = np.empty(init_score.size, dtype=np.float64)
for i in range(num_data):
for j in range(predictor.num_class):
new_init_score[j * num_data + i] = init_score[i * predictor.num_class + j]
init_score = new_init_score
elif self.init_score is not None:
init_score = np.zeros(self.init_score.shape, dtype=np.float64)
else:
return self
self.set_init_score(init_score)
def _lazy_init(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, predictor=None,
feature_name='auto', categorical_feature='auto', params=None):
if data is None:
self.handle = None
return self
if reference is not None:
self.pandas_categorical = reference.pandas_categorical
categorical_feature = reference.categorical_feature
data, feature_name, categorical_feature, self.pandas_categorical = _data_from_pandas(data,
feature_name,
categorical_feature,
self.pandas_categorical)
label = _label_from_pandas(label)
# process for args
params = {} if params is None else params
args_names = (getattr(self.__class__, '_lazy_init')
.__code__
.co_varnames[:getattr(self.__class__, '_lazy_init').__code__.co_argcount])
for key in params.keys():
if key in args_names:
_log_warning(f'{key} keyword has been found in `params` and will be ignored.\n'
f'Please use {key} argument of the Dataset constructor to pass this parameter.')
# get categorical features
if categorical_feature is not None:
categorical_indices = set()
feature_dict = {}
if feature_name is not None:
feature_dict = {name: i for i, name in enumerate(feature_name)}
for name in categorical_feature:
if isinstance(name, str) and name in feature_dict:
categorical_indices.add(feature_dict[name])
elif isinstance(name, int):
categorical_indices.add(name)
else:
raise TypeError(f"Wrong type({type(name).__name__}) or unknown name({name}) in categorical_feature")
if categorical_indices:
for cat_alias in _ConfigAliases.get("categorical_feature"):
if cat_alias in params:
# If the params[cat_alias] is equal to categorical_indices, do not report the warning.
if not(isinstance(params[cat_alias], list) and set(params[cat_alias]) == categorical_indices):
_log_warning(f'{cat_alias} in param dict is overridden.')
params.pop(cat_alias, None)
params['categorical_column'] = sorted(categorical_indices)
params_str = param_dict_to_str(params)
self.params = params
# process for reference dataset
ref_dataset = None
if isinstance(reference, Dataset):
ref_dataset = reference.construct().handle
elif reference is not None:
raise TypeError('Reference dataset should be None or dataset instance')
# start construct data
if isinstance(data, (str, Path)):
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromFile(
c_str(str(data)),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
elif isinstance(data, scipy.sparse.csr_matrix):
self.__init_from_csr(data, params_str, ref_dataset)
elif isinstance(data, scipy.sparse.csc_matrix):
self.__init_from_csc(data, params_str, ref_dataset)
elif isinstance(data, np.ndarray):
self.__init_from_np2d(data, params_str, ref_dataset)
elif isinstance(data, list) and len(data) > 0:
if all(isinstance(x, np.ndarray) for x in data):
self.__init_from_list_np2d(data, params_str, ref_dataset)
elif all(isinstance(x, Sequence) for x in data):
self.__init_from_seqs(data, ref_dataset)
else:
raise TypeError('Data list can only be of ndarray or Sequence')
elif isinstance(data, Sequence):
self.__init_from_seqs([data], ref_dataset)
elif isinstance(data, dt_DataTable):
self.__init_from_np2d(data.to_numpy(), params_str, ref_dataset)
else:
try:
csr = scipy.sparse.csr_matrix(data)
self.__init_from_csr(csr, params_str, ref_dataset)
except BaseException:
raise TypeError(f'Cannot initialize Dataset from {type(data).__name__}')
if label is not None:
self.set_label(label)
if self.get_label() is None:
raise ValueError("Label should not be None")
if weight is not None:
self.set_weight(weight)
if group is not None:
self.set_group(group)
if isinstance(predictor, _InnerPredictor):
if self._predictor is None and init_score is not None:
_log_warning("The init_score will be overridden by the prediction of init_model.")
self._set_init_score_by_predictor(predictor, data)
elif init_score is not None:
self.set_init_score(init_score)
elif predictor is not None:
raise TypeError(f'Wrong predictor type {type(predictor).__name__}')
# set feature names
return self.set_feature_name(feature_name)
@staticmethod
def _yield_row_from_seqlist(seqs: List[Sequence], indices: Iterable[int]):
offset = 0
seq_id = 0
seq = seqs[seq_id]
for row_id in indices:
assert row_id >= offset, "sample indices are expected to be monotonic"
while row_id >= offset + len(seq):
offset += len(seq)
seq_id += 1
seq = seqs[seq_id]
id_in_seq = row_id - offset
row = seq[id_in_seq]
yield row if row.flags['OWNDATA'] else row.copy()
def __sample(self, seqs: List[Sequence], total_nrow: int) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""Sample data from seqs.
Mimics behavior in c_api.cpp:LGBM_DatasetCreateFromMats()
Returns
-------
sampled_rows, sampled_row_indices
"""
indices = self._create_sample_indices(total_nrow)
# Select sampled rows, transpose to column order.
sampled = np.array([row for row in self._yield_row_from_seqlist(seqs, indices)])
sampled = sampled.T
filtered = []
filtered_idx = []
sampled_row_range = np.arange(len(indices), dtype=np.int32)
for col in sampled:
col_predicate = (np.abs(col) > ZERO_THRESHOLD) | np.isnan(col)
filtered_col = col[col_predicate]
filtered_row_idx = sampled_row_range[col_predicate]
filtered.append(filtered_col)
filtered_idx.append(filtered_row_idx)
return filtered, filtered_idx
def __init_from_seqs(self, seqs: List[Sequence], ref_dataset: Optional['Dataset'] = None):
"""
Initialize data from list of Sequence objects.
Sequence: Generic Data Access Object
Supports random access and access by batch if properly defined by user
Data scheme uniformity are trusted, not checked
"""
total_nrow = sum(len(seq) for seq in seqs)
# create validation dataset from ref_dataset
if ref_dataset is not None:
self._init_from_ref_dataset(total_nrow, ref_dataset)
else:
param_str = param_dict_to_str(self.get_params())
sample_cnt = _get_sample_count(total_nrow, param_str)
sample_data, col_indices = self.__sample(seqs, total_nrow)
self._init_from_sample(sample_data, col_indices, sample_cnt, total_nrow)
for seq in seqs:
nrow = len(seq)
batch_size = getattr(seq, 'batch_size', None) or Sequence.batch_size
for start in range(0, nrow, batch_size):
end = min(start + batch_size, nrow)
self._push_rows(seq[start:end])
return self
def __init_from_np2d(self, mat, params_str, ref_dataset):
"""Initialize data from a 2-D numpy matrix."""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
self.handle = ctypes.c_void_p()
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
_safe_call(_LIB.LGBM_DatasetCreateFromMat(
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int32(mat.shape[0]),
ctypes.c_int32(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_list_np2d(self, mats, params_str, ref_dataset):
"""Initialize data from a list of 2-D numpy matrices."""
ncol = mats[0].shape[1]
nrow = np.empty((len(mats),), np.int32)
if mats[0].dtype == np.float64:
ptr_data = (ctypes.POINTER(ctypes.c_double) * len(mats))()
else:
ptr_data = (ctypes.POINTER(ctypes.c_float) * len(mats))()
holders = []
type_ptr_data = None
for i, mat in enumerate(mats):
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
if mat.shape[1] != ncol:
raise ValueError('Input arrays must have same number of columns')
nrow[i] = mat.shape[0]
if mat.dtype == np.float32 or mat.dtype == np.float64:
mats[i] = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
mats[i] = np.array(mat.reshape(mat.size), dtype=np.float32)
chunk_ptr_data, chunk_type_ptr_data, holder = c_float_array(mats[i])
if type_ptr_data is not None and chunk_type_ptr_data != type_ptr_data:
raise ValueError('Input chunks must have same type')
ptr_data[i] = chunk_ptr_data
type_ptr_data = chunk_type_ptr_data
holders.append(holder)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromMats(
ctypes.c_int32(len(mats)),
ctypes.cast(ptr_data, ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
ctypes.c_int(type_ptr_data),
nrow.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int32(ncol),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_csr(self, csr, params_str, ref_dataset):
"""Initialize data from a CSR matrix."""
if len(csr.indices) != len(csr.data):
raise ValueError(f'Length mismatch: {len(csr.indices)} vs {len(csr.data)}')
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
assert csr.shape[1] <= MAX_INT32
csr_indices = csr.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_DatasetCreateFromCSR(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_csc(self, csc, params_str, ref_dataset):
"""Initialize data from a CSC matrix."""
if len(csc.indices) != len(csc.data):
raise ValueError(f'Length mismatch: {len(csc.indices)} vs {len(csc.data)}')
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
assert csc.shape[0] <= MAX_INT32
csc_indices = csc.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_DatasetCreateFromCSC(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
@staticmethod
def _compare_params_for_warning(
params: Optional[Dict[str, Any]],
other_params: Optional[Dict[str, Any]],
ignore_keys: Set[str]
) -> bool:
"""Compare two dictionaries with params ignoring some keys.
It is only for the warning purpose.
Parameters
----------
params : dict or None
One dictionary with parameters to compare.
other_params : dict or None
Another dictionary with parameters to compare.
ignore_keys : set
Keys that should be ignored during comparing two dictionaries.
Returns
-------
compare_result : bool
Returns whether two dictionaries with params are equal.
"""
if params is None:
params = {}
if other_params is None:
other_params = {}
for k in other_params:
if k not in ignore_keys:
if k not in params or params[k] != other_params[k]:
return False
for k in params:
if k not in ignore_keys:
if k not in other_params or params[k] != other_params[k]:
return False
return True
def construct(self):
"""Lazy init.
Returns
-------
self : Dataset
Constructed Dataset object.
"""
if self.handle is None:
if self.reference is not None:
reference_params = self.reference.get_params()
params = self.get_params()
if params != reference_params:
if not self._compare_params_for_warning(
params=params,
other_params=reference_params,
ignore_keys=_ConfigAliases.get("categorical_feature")
):
_log_warning('Overriding the parameters from Reference Dataset.')
self._update_params(reference_params)
if self.used_indices is None:
# create valid
self._lazy_init(self.data, label=self.label, reference=self.reference,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
feature_name=self.feature_name, params=self.params)
else:
# construct subset
used_indices = list_to_1d_numpy(self.used_indices, np.int32, name='used_indices')
assert used_indices.flags.c_contiguous
if self.reference.group is not None:
group_info = np.array(self.reference.group).astype(np.int32, copy=False)
_, self.group = np.unique(np.repeat(range(len(group_info)), repeats=group_info)[self.used_indices],
return_counts=True)
self.handle = ctypes.c_void_p()
params_str = param_dict_to_str(self.params)
_safe_call(_LIB.LGBM_DatasetGetSubset(
self.reference.construct().handle,
used_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int32(used_indices.shape[0]),
c_str(params_str),
ctypes.byref(self.handle)))
if not self.free_raw_data:
self.get_data()
if self.group is not None:
self.set_group(self.group)
if self.get_label() is None:
raise ValueError("Label should not be None.")
if isinstance(self._predictor, _InnerPredictor) and self._predictor is not self.reference._predictor:
self.get_data()
self._set_init_score_by_predictor(self._predictor, self.data, used_indices)
else:
# create train
self._lazy_init(self.data, label=self.label,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
feature_name=self.feature_name, categorical_feature=self.categorical_feature, params=self.params)
if self.free_raw_data:
self.data = None
return self
def create_valid(self, data, label=None, weight=None, group=None, init_score=None, params=None):
"""Create validation data align with current Dataset.
Parameters
----------
data : str, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, Sequence, list of Sequence or list of numpy array
Data source of Dataset.
If str or pathlib.Path, it represents the path to a text file (CSV, TSV, or LibSVM) or a LightGBM Dataset binary file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Label of the data.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance. Weights should be non-negative.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
init_score : list, list of lists (for multi-class task), numpy array, pandas Series, pandas DataFrame (for multi-class task), or None, optional (default=None)
Init score for Dataset.
params : dict or None, optional (default=None)
Other parameters for validation Dataset.
Returns
-------
valid : Dataset
Validation Dataset with reference to self.
"""
ret = Dataset(data, label=label, reference=self,
weight=weight, group=group, init_score=init_score,
params=params, free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
return ret
def subset(self, used_indices, params=None):
"""Get subset of current Dataset.
Parameters
----------
used_indices : list of int
Indices used to create the subset.
params : dict or None, optional (default=None)
These parameters will be passed to Dataset constructor.
Returns
-------
subset : Dataset
Subset of the current Dataset.
"""
if params is None:
params = self.params
ret = Dataset(None, reference=self, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=params,
free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
ret.used_indices = sorted(used_indices)
return ret
def save_binary(self, filename):
"""Save Dataset to a binary file.
.. note::
Please note that `init_score` is not saved in binary file.
If you need it, please set it again after loading Dataset.
Parameters
----------
filename : str or pathlib.Path
Name of the output file.
Returns
-------
self : Dataset
Returns self.
"""
_safe_call(_LIB.LGBM_DatasetSaveBinary(
self.construct().handle,
c_str(str(filename))))
return self
def _update_params(self, params):
if not params:
return self
params = deepcopy(params)
def update():
if not self.params:
self.params = params
else:
self.params_back_up = deepcopy(self.params)
self.params.update(params)
if self.handle is None:
update()
elif params is not None:
ret = _LIB.LGBM_DatasetUpdateParamChecking(
c_str(param_dict_to_str(self.params)),
c_str(param_dict_to_str(params)))
if ret != 0:
# could be updated if data is not freed
if self.data is not None:
update()
self._free_handle()
else:
raise LightGBMError(_LIB.LGBM_GetLastError().decode('utf-8'))
return self
def _reverse_update_params(self):
if self.handle is None:
self.params = deepcopy(self.params_back_up)
self.params_back_up = None
return self
def set_field(self, field_name, data):
"""Set property into the Dataset.
Parameters
----------
field_name : str
The field name of the information.
data : list, list of lists (for multi-class task), numpy array, pandas Series, pandas DataFrame (for multi-class task), or None
The data to be set.
Returns
-------
self : Dataset
Dataset with set property.
"""
if self.handle is None:
raise Exception(f"Cannot set {field_name} before construct dataset")
if data is None:
# set to None
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
None,
ctypes.c_int(0),
ctypes.c_int(FIELD_TYPE_MAPPER[field_name])))
return self
if field_name == 'init_score':
dtype = np.float64
if _is_1d_collection(data):
data = list_to_1d_numpy(data, dtype, name=field_name)
elif _is_2d_collection(data):
data = _data_to_2d_numpy(data, dtype, name=field_name)
data = data.ravel(order='F')
else:
raise TypeError(
'init_score must be list, numpy 1-D array or pandas Series.\n'
'In multiclass classification init_score can also be a list of lists, numpy 2-D array or pandas DataFrame.'
)
else:
dtype = np.int32 if field_name == 'group' else np.float32
data = list_to_1d_numpy(data, dtype, name=field_name)
if data.dtype == np.float32 or data.dtype == np.float64:
ptr_data, type_data, _ = c_float_array(data)
elif data.dtype == np.int32:
ptr_data, type_data, _ = c_int_array(data)
else:
raise TypeError(f"Expected np.float32/64 or np.int32, met type({data.dtype})")
if type_data != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Input type error for set_field")
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
ptr_data,
ctypes.c_int(len(data)),
ctypes.c_int(type_data)))
self.version += 1
return self
def get_field(self, field_name):
"""Get property from the Dataset.
Parameters
----------
field_name : str
The field name of the information.
Returns
-------
info : numpy array or None
A numpy array with information from the Dataset.
"""
if self.handle is None:
raise Exception(f"Cannot get {field_name} before construct Dataset")
tmp_out_len = ctypes.c_int(0)
out_type = ctypes.c_int(0)
ret = ctypes.POINTER(ctypes.c_void_p)()
_safe_call(_LIB.LGBM_DatasetGetField(
self.handle,
c_str(field_name),
ctypes.byref(tmp_out_len),
ctypes.byref(ret),
ctypes.byref(out_type)))
if out_type.value != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Return type error for get_field")
if tmp_out_len.value == 0:
return None
if out_type.value == C_API_DTYPE_INT32:
arr = cint32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int32)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT32:
arr = cfloat32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_float)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT64:
arr = cfloat64_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_double)), tmp_out_len.value)
else:
raise TypeError("Unknown type")
if field_name == 'init_score':
num_data = self.num_data()
num_classes = arr.size // num_data
if num_classes > 1:
arr = arr.reshape((num_data, num_classes), order='F')
return arr
def set_categorical_feature(self, categorical_feature):
"""Set categorical features.
Parameters
----------
categorical_feature : list of int or str
Names or indices of categorical features.
Returns
-------
self : Dataset
Dataset with set categorical features.
"""
if self.categorical_feature == categorical_feature:
return self
if self.data is not None:
if self.categorical_feature is None:
self.categorical_feature = categorical_feature
return self._free_handle()
elif categorical_feature == 'auto':
return self
else:
if self.categorical_feature != 'auto':
_log_warning('categorical_feature in Dataset is overridden.\n'
f'New categorical_feature is {sorted(list(categorical_feature))}')
self.categorical_feature = categorical_feature
return self._free_handle()
else:
raise LightGBMError("Cannot set categorical feature after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
def _set_predictor(self, predictor):
"""Set predictor for continued training.
It is not recommended for user to call this function.
Please use init_model argument in engine.train() or engine.cv() instead.
"""
if predictor is self._predictor and (predictor is None or predictor.current_iteration() == self._predictor.current_iteration()):
return self
if self.handle is None:
self._predictor = predictor
elif self.data is not None:
self._predictor = predictor
self._set_init_score_by_predictor(self._predictor, self.data)
elif self.used_indices is not None and self.reference is not None and self.reference.data is not None:
self._predictor = predictor
self._set_init_score_by_predictor(self._predictor, self.reference.data, self.used_indices)
else:
raise LightGBMError("Cannot set predictor after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
return self
def set_reference(self, reference):
"""Set reference Dataset.
Parameters
----------
reference : Dataset
Reference that is used as a template to construct the current Dataset.
Returns
-------
self : Dataset
Dataset with set reference.
"""
self.set_categorical_feature(reference.categorical_feature) \
.set_feature_name(reference.feature_name) \
._set_predictor(reference._predictor)
# we're done if self and reference share a common upstream reference
if self.get_ref_chain().intersection(reference.get_ref_chain()):
return self
if self.data is not None:
self.reference = reference
return self._free_handle()
else:
raise LightGBMError("Cannot set reference after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
def set_feature_name(self, feature_name):
"""Set feature name.
Parameters
----------
feature_name : list of str
Feature names.
Returns
-------
self : Dataset
Dataset with set feature name.
"""
if feature_name != 'auto':
self.feature_name = feature_name
if self.handle is not None and feature_name is not None and feature_name != 'auto':
if len(feature_name) != self.num_feature():
raise ValueError(f"Length of feature_name({len(feature_name)}) and num_feature({self.num_feature()}) don't match")
c_feature_name = [c_str(name) for name in feature_name]
_safe_call(_LIB.LGBM_DatasetSetFeatureNames(
self.handle,
c_array(ctypes.c_char_p, c_feature_name),
ctypes.c_int(len(feature_name))))
return self
def set_label(self, label):
"""Set label of Dataset.
Parameters
----------
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None
The label information to be set into Dataset.
Returns
-------
self : Dataset
Dataset with set label.
"""
self.label = label
if self.handle is not None:
label = list_to_1d_numpy(_label_from_pandas(label), name='label')
self.set_field('label', label)
self.label = self.get_field('label') # original values can be modified at cpp side
return self
def set_weight(self, weight):
"""Set weight of each instance.
Parameters
----------
weight : list, numpy 1-D array, pandas Series or None
Weight to be set for each data point. Weights should be non-negative.
Returns
-------
self : Dataset
Dataset with set weight.
"""
if weight is not None and np.all(weight == 1):
weight = None
self.weight = weight
if self.handle is not None and weight is not None:
weight = list_to_1d_numpy(weight, name='weight')
self.set_field('weight', weight)
self.weight = self.get_field('weight') # original values can be modified at cpp side
return self
def set_init_score(self, init_score):
"""Set init score of Booster to start from.
Parameters
----------
init_score : list, list of lists (for multi-class task), numpy array, pandas Series, pandas DataFrame (for multi-class task), or None
Init score for Booster.
Returns
-------
self : Dataset
Dataset with set init score.
"""
self.init_score = init_score
if self.handle is not None and init_score is not None:
self.set_field('init_score', init_score)
self.init_score = self.get_field('init_score') # original values can be modified at cpp side
return self
def set_group(self, group):
"""Set group size of Dataset (used for ranking).
Parameters
----------
group : list, numpy 1-D array, pandas Series or None
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
Returns
-------
self : Dataset
Dataset with set group.
"""
self.group = group
if self.handle is not None and group is not None:
group = list_to_1d_numpy(group, np.int32, name='group')
self.set_field('group', group)
return self
def get_feature_name(self):
"""Get the names of columns (features) in the Dataset.
Returns
-------
feature_names : list of str
The names of columns (features) in the Dataset.
"""
if self.handle is None:
raise LightGBMError("Cannot get feature_name before construct dataset")
num_feature = self.num_feature()
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_DatasetGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(reserved_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if num_feature != tmp_out_len.value:
raise ValueError("Length of feature names doesn't equal with num_feature")
actual_string_buffer_size = required_string_buffer_size.value
# if buffer length is not long enough, reallocate buffers
if reserved_string_buffer_size < actual_string_buffer_size:
string_buffers = [ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_DatasetGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(actual_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
return [string_buffers[i].value.decode('utf-8') for i in range(num_feature)]
def get_label(self):
"""Get the label of the Dataset.
Returns
-------
label : numpy array or None
The label information from the Dataset.
"""
if self.label is None:
self.label = self.get_field('label')
return self.label
def get_weight(self):
"""Get the weight of the Dataset.
Returns
-------
weight : numpy array or None
Weight for each data point from the Dataset. Weights should be non-negative.
"""
if self.weight is None:
self.weight = self.get_field('weight')
return self.weight
def get_init_score(self):
"""Get the initial score of the Dataset.
Returns
-------
init_score : numpy array or None
Init score of Booster.
"""
if self.init_score is None:
self.init_score = self.get_field('init_score')
return self.init_score
def get_data(self):
"""Get the raw data of the Dataset.
Returns
-------
data : str, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, Sequence, list of Sequence or list of numpy array or None
Raw data used in the Dataset construction.
"""
if self.handle is None:
raise Exception("Cannot get data before construct Dataset")
if self.need_slice and self.used_indices is not None and self.reference is not None:
self.data = self.reference.data
if self.data is not None:
if isinstance(self.data, np.ndarray) or scipy.sparse.issparse(self.data):
self.data = self.data[self.used_indices, :]
elif isinstance(self.data, pd_DataFrame):
self.data = self.data.iloc[self.used_indices].copy()
elif isinstance(self.data, dt_DataTable):
self.data = self.data[self.used_indices, :]
elif isinstance(self.data, Sequence):
self.data = self.data[self.used_indices]
elif isinstance(self.data, list) and len(self.data) > 0 and all(isinstance(x, Sequence) for x in self.data):
self.data = np.array([row for row in self._yield_row_from_seqlist(self.data, self.used_indices)])
else:
_log_warning(f"Cannot subset {type(self.data).__name__} type of raw data.\n"
"Returning original raw data")
self.need_slice = False
if self.data is None:
raise LightGBMError("Cannot call `get_data` after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
return self.data
def get_group(self):
"""Get the group of the Dataset.
Returns
-------
group : numpy array or None
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
"""
if self.group is None:
self.group = self.get_field('group')
if self.group is not None:
# group data from LightGBM is boundaries data, need to convert to group size
self.group = np.diff(self.group)
return self.group
def num_data(self):
"""Get the number of rows in the Dataset.
Returns
-------
number_of_rows : int
The number of rows in the Dataset.
"""
if self.handle is not None:
ret = ctypes.c_int(0)
_safe_call(_LIB.LGBM_DatasetGetNumData(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_data before construct dataset")
def num_feature(self):
"""Get the number of columns (features) in the Dataset.
Returns
-------
number_of_columns : int
The number of columns (features) in the Dataset.
"""
if self.handle is not None:
ret = ctypes.c_int(0)
_safe_call(_LIB.LGBM_DatasetGetNumFeature(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_feature before construct dataset")
def feature_num_bin(self, feature: int) -> int:
"""Get the number of bins for a feature.
Parameters
----------
feature : int
Index of the feature.
Returns
-------
number_of_bins : int
The number of constructed bins for the feature in the Dataset.
"""
if self.handle is not None:
ret = ctypes.c_int(0)
_safe_call(_LIB.LGBM_DatasetGetFeatureNumBin(self.handle,
ctypes.c_int(feature),
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get feature_num_bin before construct dataset")
def get_ref_chain(self, ref_limit=100):
"""Get a chain of Dataset objects.
Starts with r, then goes to r.reference (if exists),
then to r.reference.reference, etc.
until we hit ``ref_limit`` or a reference loop.
Parameters
----------
ref_limit : int, optional (default=100)
The limit number of references.
Returns
-------
ref_chain : set of Dataset
Chain of references of the Datasets.
"""
head = self
ref_chain = set()
while len(ref_chain) < ref_limit:
if isinstance(head, Dataset):
ref_chain.add(head)
if (head.reference is not None) and (head.reference not in ref_chain):
head = head.reference
else:
break
else:
break
return ref_chain
def add_features_from(self, other):
"""Add features from other Dataset to the current Dataset.
Both Datasets must be constructed before calling this method.
Parameters
----------
other : Dataset
The Dataset to take features from.
Returns
-------
self : Dataset
Dataset with the new features added.
"""
if self.handle is None or other.handle is None:
raise ValueError('Both source and target Datasets must be constructed before adding features')
_safe_call(_LIB.LGBM_DatasetAddFeaturesFrom(self.handle, other.handle))
was_none = self.data is None
old_self_data_type = type(self.data).__name__
if other.data is None:
self.data = None
elif self.data is not None:
if isinstance(self.data, np.ndarray):
if isinstance(other.data, np.ndarray):
self.data = np.hstack((self.data, other.data))
elif scipy.sparse.issparse(other.data):
self.data = np.hstack((self.data, other.data.toarray()))
elif isinstance(other.data, pd_DataFrame):
self.data = np.hstack((self.data, other.data.values))
elif isinstance(other.data, dt_DataTable):
self.data = np.hstack((self.data, other.data.to_numpy()))
else:
self.data = None
elif scipy.sparse.issparse(self.data):
sparse_format = self.data.getformat()
if isinstance(other.data, np.ndarray) or scipy.sparse.issparse(other.data):
self.data = scipy.sparse.hstack((self.data, other.data), format=sparse_format)
elif isinstance(other.data, pd_DataFrame):
self.data = scipy.sparse.hstack((self.data, other.data.values), format=sparse_format)
elif isinstance(other.data, dt_DataTable):
self.data = scipy.sparse.hstack((self.data, other.data.to_numpy()), format=sparse_format)
else:
self.data = None
elif isinstance(self.data, pd_DataFrame):
if not PANDAS_INSTALLED:
raise LightGBMError("Cannot add features to DataFrame type of raw data "
"without pandas installed. "
"Install pandas and restart your session.")
if isinstance(other.data, np.ndarray):
self.data = concat((self.data, pd_DataFrame(other.data)),
axis=1, ignore_index=True)
elif scipy.sparse.issparse(other.data):
self.data = concat((self.data, pd_DataFrame(other.data.toarray())),
axis=1, ignore_index=True)
elif isinstance(other.data, pd_DataFrame):
self.data = concat((self.data, other.data),
axis=1, ignore_index=True)
elif isinstance(other.data, dt_DataTable):
self.data = concat((self.data, pd_DataFrame(other.data.to_numpy())),
axis=1, ignore_index=True)
else:
self.data = None
elif isinstance(self.data, dt_DataTable):
if isinstance(other.data, np.ndarray):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data)))
elif scipy.sparse.issparse(other.data):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.toarray())))
elif isinstance(other.data, pd_DataFrame):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.values)))
elif isinstance(other.data, dt_DataTable):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.to_numpy())))
else:
self.data = None
else:
self.data = None
if self.data is None:
err_msg = (f"Cannot add features from {type(other.data).__name__} type of raw data to "
f"{old_self_data_type} type of raw data.\n")
err_msg += ("Set free_raw_data=False when construct Dataset to avoid this"
if was_none else "Freeing raw data")
_log_warning(err_msg)
self.feature_name = self.get_feature_name()
_log_warning("Reseting categorical features.\n"
"You can set new categorical features via ``set_categorical_feature`` method")
self.categorical_feature = "auto"
self.pandas_categorical = None
return self
def _dump_text(self, filename):
"""Save Dataset to a text file.
This format cannot be loaded back in by LightGBM, but is useful for debugging purposes.
Parameters
----------
filename : str or pathlib.Path
Name of the output file.
Returns
-------
self : Dataset
Returns self.
"""
_safe_call(_LIB.LGBM_DatasetDumpText(
self.construct().handle,
c_str(str(filename))))
return self
class Booster:
"""Booster in LightGBM."""
def __init__(self, params=None, train_set=None, model_file=None, model_str=None):
"""Initialize the Booster.
Parameters
----------
params : dict or None, optional (default=None)
Parameters for Booster.
train_set : Dataset or None, optional (default=None)
Training dataset.
model_file : str, pathlib.Path or None, optional (default=None)
Path to the model file.
model_str : str or None, optional (default=None)
Model will be loaded from this string.
"""
self.handle = None
self.network = False
self.__need_reload_eval_info = True
self._train_data_name = "training"
self.__attr = {}
self.__set_objective_to_none = False
self.best_iteration = -1
self.best_score = {}
params = {} if params is None else deepcopy(params)
if train_set is not None:
# Training task
if not isinstance(train_set, Dataset):
raise TypeError(f'Training data should be Dataset instance, met {type(train_set).__name__}')
params = _choose_param_value(
main_param_name="machines",
params=params,
default_value=None
)
# if "machines" is given, assume user wants to do distributed learning, and set up network
if params["machines"] is None:
params.pop("machines", None)
else:
machines = params["machines"]
if isinstance(machines, str):
num_machines_from_machine_list = len(machines.split(','))
elif isinstance(machines, (list, set)):
num_machines_from_machine_list = len(machines)
machines = ','.join(machines)
else:
raise ValueError("Invalid machines in params.")
params = _choose_param_value(
main_param_name="num_machines",
params=params,
default_value=num_machines_from_machine_list
)
params = _choose_param_value(
main_param_name="local_listen_port",
params=params,
default_value=12400
)
self.set_network(
machines=machines,
local_listen_port=params["local_listen_port"],
listen_time_out=params.get("time_out", 120),
num_machines=params["num_machines"]
)
# construct booster object
train_set.construct()
# copy the parameters from train_set
params.update(train_set.get_params())
params_str = param_dict_to_str(params)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_BoosterCreate(
train_set.handle,
c_str(params_str),
ctypes.byref(self.handle)))
# save reference to data
self.train_set = train_set
self.valid_sets = []
self.name_valid_sets = []
self.__num_dataset = 1
self.__init_predictor = train_set._predictor
if self.__init_predictor is not None:
_safe_call(_LIB.LGBM_BoosterMerge(
self.handle,
self.__init_predictor.handle))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
# buffer for inner predict
self.__inner_predict_buffer = [None]
self.__is_predicted_cur_iter = [False]
self.__get_eval_info()
self.pandas_categorical = train_set.pandas_categorical
self.train_set_version = train_set.version
elif model_file is not None:
# Prediction task
out_num_iterations = ctypes.c_int(0)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(str(model_file)),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
elif model_str is not None:
self.model_from_string(model_str)
else:
raise TypeError('Need at least one training dataset or model file or model string '
'to create Booster instance')
self.params = params
def __del__(self):
try:
if self.network:
self.free_network()
except AttributeError:
pass
try:
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
except AttributeError:
pass
def __copy__(self):
return self.__deepcopy__(None)
def __deepcopy__(self, _):
model_str = self.model_to_string(num_iteration=-1)
booster = Booster(model_str=model_str)
return booster
def __getstate__(self):
this = self.__dict__.copy()
handle = this['handle']
this.pop('train_set', None)
this.pop('valid_sets', None)
if handle is not None:
this["handle"] = self.model_to_string(num_iteration=-1)
return this
def __setstate__(self, state):
model_str = state.get('handle', None)
if model_str is not None:
handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(handle)))
state['handle'] = handle
self.__dict__.update(state)
def free_dataset(self):
"""Free Booster's Datasets.
Returns
-------
self : Booster
Booster without Datasets.
"""
self.__dict__.pop('train_set', None)
self.__dict__.pop('valid_sets', None)
self.__num_dataset = 0
return self
def _free_buffer(self):
self.__inner_predict_buffer = []
self.__is_predicted_cur_iter = []
return self
def set_network(
self,
machines: Union[List[str], Set[str], str],
local_listen_port: int = 12400,
listen_time_out: int = 120,
num_machines: int = 1
) -> "Booster":
"""Set the network configuration.
Parameters
----------
machines : list, set or str
Names of machines.
local_listen_port : int, optional (default=12400)
TCP listen port for local machines.
listen_time_out : int, optional (default=120)
Socket time-out in minutes.
num_machines : int, optional (default=1)
The number of machines for distributed learning application.
Returns
-------
self : Booster
Booster with set network.
"""
if isinstance(machines, (list, set)):
machines = ','.join(machines)
_safe_call(_LIB.LGBM_NetworkInit(c_str(machines),
ctypes.c_int(local_listen_port),
ctypes.c_int(listen_time_out),
ctypes.c_int(num_machines)))
self.network = True
return self
def free_network(self):
"""Free Booster's network.
Returns
-------
self : Booster
Booster with freed network.
"""
_safe_call(_LIB.LGBM_NetworkFree())
self.network = False
return self
def trees_to_dataframe(self):
"""Parse the fitted model and return in an easy-to-read pandas DataFrame.
The returned DataFrame has the following columns.
- ``tree_index`` : int64, which tree a node belongs to. 0-based, so a value of ``6``, for example, means "this node is in the 7th tree".
- ``node_depth`` : int64, how far a node is from the root of the tree. The root node has a value of ``1``, its direct children are ``2``, etc.
- ``node_index`` : str, unique identifier for a node.
- ``left_child`` : str, ``node_index`` of the child node to the left of a split. ``None`` for leaf nodes.
- ``right_child`` : str, ``node_index`` of the child node to the right of a split. ``None`` for leaf nodes.
- ``parent_index`` : str, ``node_index`` of this node's parent. ``None`` for the root node.
- ``split_feature`` : str, name of the feature used for splitting. ``None`` for leaf nodes.
- ``split_gain`` : float64, gain from adding this split to the tree. ``NaN`` for leaf nodes.
- ``threshold`` : float64, value of the feature used to decide which side of the split a record will go down. ``NaN`` for leaf nodes.
- ``decision_type`` : str, logical operator describing how to compare a value to ``threshold``.
For example, ``split_feature = "Column_10", threshold = 15, decision_type = "<="`` means that
records where ``Column_10 <= 15`` follow the left side of the split, otherwise follows the right side of the split. ``None`` for leaf nodes.
- ``missing_direction`` : str, split direction that missing values should go to. ``None`` for leaf nodes.
- ``missing_type`` : str, describes what types of values are treated as missing.
- ``value`` : float64, predicted value for this leaf node, multiplied by the learning rate.
- ``weight`` : float64 or int64, sum of Hessian (second-order derivative of objective), summed over observations that fall in this node.
- ``count`` : int64, number of records in the training data that fall into this node.
Returns
-------
result : pandas DataFrame
Returns a pandas DataFrame of the parsed model.
"""
if not PANDAS_INSTALLED:
raise LightGBMError('This method cannot be run without pandas installed. '
'You must install pandas and restart your session to use this method.')
if self.num_trees() == 0:
raise LightGBMError('There are no trees in this Booster and thus nothing to parse')
def _is_split_node(tree):
return 'split_index' in tree.keys()
def create_node_record(tree, node_depth=1, tree_index=None,
feature_names=None, parent_node=None):
def _get_node_index(tree, tree_index):
tree_num = f'{tree_index}-' if tree_index is not None else ''
is_split = _is_split_node(tree)
node_type = 'S' if is_split else 'L'
# if a single node tree it won't have `leaf_index` so return 0
node_num = tree.get('split_index' if is_split else 'leaf_index', 0)
return f"{tree_num}{node_type}{node_num}"
def _get_split_feature(tree, feature_names):
if _is_split_node(tree):
if feature_names is not None:
feature_name = feature_names[tree['split_feature']]
else:
feature_name = tree['split_feature']
else:
feature_name = None
return feature_name
def _is_single_node_tree(tree):
return set(tree.keys()) == {'leaf_value'}
# Create the node record, and populate universal data members
node = OrderedDict()
node['tree_index'] = tree_index
node['node_depth'] = node_depth
node['node_index'] = _get_node_index(tree, tree_index)
node['left_child'] = None
node['right_child'] = None
node['parent_index'] = parent_node
node['split_feature'] = _get_split_feature(tree, feature_names)
node['split_gain'] = None
node['threshold'] = None
node['decision_type'] = None
node['missing_direction'] = None
node['missing_type'] = None
node['value'] = None
node['weight'] = None
node['count'] = None
# Update values to reflect node type (leaf or split)
if _is_split_node(tree):
node['left_child'] = _get_node_index(tree['left_child'], tree_index)
node['right_child'] = _get_node_index(tree['right_child'], tree_index)
node['split_gain'] = tree['split_gain']
node['threshold'] = tree['threshold']
node['decision_type'] = tree['decision_type']
node['missing_direction'] = 'left' if tree['default_left'] else 'right'
node['missing_type'] = tree['missing_type']
node['value'] = tree['internal_value']
node['weight'] = tree['internal_weight']
node['count'] = tree['internal_count']
else:
node['value'] = tree['leaf_value']
if not _is_single_node_tree(tree):
node['weight'] = tree['leaf_weight']
node['count'] = tree['leaf_count']
return node
def tree_dict_to_node_list(tree, node_depth=1, tree_index=None,
feature_names=None, parent_node=None):
node = create_node_record(tree,
node_depth=node_depth,
tree_index=tree_index,
feature_names=feature_names,
parent_node=parent_node)
res = [node]
if _is_split_node(tree):
# traverse the next level of the tree
children = ['left_child', 'right_child']
for child in children:
subtree_list = tree_dict_to_node_list(
tree[child],
node_depth=node_depth + 1,
tree_index=tree_index,
feature_names=feature_names,
parent_node=node['node_index'])
# In tree format, "subtree_list" is a list of node records (dicts),
# and we add node to the list.
res.extend(subtree_list)
return res
model_dict = self.dump_model()
feature_names = model_dict['feature_names']
model_list = []
for tree in model_dict['tree_info']:
model_list.extend(tree_dict_to_node_list(tree['tree_structure'],
tree_index=tree['tree_index'],
feature_names=feature_names))
return pd_DataFrame(model_list, columns=model_list[0].keys())
def set_train_data_name(self, name):
"""Set the name to the training Dataset.
Parameters
----------
name : str
Name for the training Dataset.
Returns
-------
self : Booster
Booster with set training Dataset name.
"""
self._train_data_name = name
return self
def add_valid(self, data, name):
"""Add validation data.
Parameters
----------
data : Dataset
Validation data.
name : str
Name of validation data.
Returns
-------
self : Booster
Booster with set validation data.
"""
if not isinstance(data, Dataset):
raise TypeError(f'Validation data should be Dataset instance, met {type(data).__name__}')
if data._predictor is not self.__init_predictor:
raise LightGBMError("Add validation data failed, "
"you should use same predictor for these data")
_safe_call(_LIB.LGBM_BoosterAddValidData(
self.handle,
data.construct().handle))
self.valid_sets.append(data)
self.name_valid_sets.append(name)
self.__num_dataset += 1
self.__inner_predict_buffer.append(None)
self.__is_predicted_cur_iter.append(False)
return self
def reset_parameter(self, params):
"""Reset parameters of Booster.
Parameters
----------
params : dict
New parameters for Booster.
Returns
-------
self : Booster
Booster with new parameters.
"""
params_str = param_dict_to_str(params)
if params_str:
_safe_call(_LIB.LGBM_BoosterResetParameter(
self.handle,
c_str(params_str)))
self.params.update(params)
return self
def update(self, train_set=None, fobj=None):
"""Update Booster for one iteration.
Parameters
----------
train_set : Dataset or None, optional (default=None)
Training data.
If None, last training data is used.
fobj : callable or None, optional (default=None)
Customized objective function.
Should accept two parameters: preds, train_data,
and return (grad, hess).
preds : numpy 1-D array or numpy 2-D array (for multi-class task)
The predicted values.
Predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task.
train_data : Dataset
The training dataset.
grad : numpy 1-D array or numpy 2-D array (for multi-class task)
The value of the first order derivative (gradient) of the loss
with respect to the elements of preds for each sample point.
hess : numpy 1-D array or numpy 2-D array (for multi-class task)
The value of the second order derivative (Hessian) of the loss
with respect to the elements of preds for each sample point.
For multi-class task, preds are numpy 2-D array of shape = [n_samples, n_classes],
and grad and hess should be returned in the same format.
Returns
-------
is_finished : bool
Whether the update was successfully finished.
"""
# need reset training data
if train_set is None and self.train_set_version != self.train_set.version:
train_set = self.train_set
is_the_same_train_set = False
else:
is_the_same_train_set = train_set is self.train_set and self.train_set_version == train_set.version
if train_set is not None and not is_the_same_train_set:
if not isinstance(train_set, Dataset):
raise TypeError(f'Training data should be Dataset instance, met {type(train_set).__name__}')
if train_set._predictor is not self.__init_predictor:
raise LightGBMError("Replace training data failed, "
"you should use same predictor for these data")
self.train_set = train_set
_safe_call(_LIB.LGBM_BoosterResetTrainingData(
self.handle,
self.train_set.construct().handle))
self.__inner_predict_buffer[0] = None
self.train_set_version = self.train_set.version
is_finished = ctypes.c_int(0)
if fobj is None:
if self.__set_objective_to_none:
raise LightGBMError('Cannot update due to null objective function.')
_safe_call(_LIB.LGBM_BoosterUpdateOneIter(
self.handle,
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
return is_finished.value == 1
else:
if not self.__set_objective_to_none:
self.reset_parameter({"objective": "none"}).__set_objective_to_none = True
grad, hess = fobj(self.__inner_predict(0), self.train_set)
return self.__boost(grad, hess)
def __boost(self, grad, hess):
"""Boost Booster for one iteration with customized gradient statistics.
.. note::
Score is returned before any transformation,
e.g. it is raw margin instead of probability of positive class for binary task.
For multi-class task, score are numpy 2-D array of shape = [n_samples, n_classes],
and grad and hess should be returned in the same format.
Parameters
----------
grad : numpy 1-D array or numpy 2-D array (for multi-class task)
The value of the first order derivative (gradient) of the loss
with respect to the elements of score for each sample point.
hess : numpy 1-D array or numpy 2-D array (for multi-class task)
The value of the second order derivative (Hessian) of the loss
with respect to the elements of score for each sample point.
Returns
-------
is_finished : bool
Whether the boost was successfully finished.
"""
if self.__num_class > 1:
grad = grad.ravel(order='F')
hess = hess.ravel(order='F')
grad = list_to_1d_numpy(grad, name='gradient')
hess = list_to_1d_numpy(hess, name='hessian')
assert grad.flags.c_contiguous
assert hess.flags.c_contiguous
if len(grad) != len(hess):
raise ValueError(f"Lengths of gradient ({len(grad)}) and Hessian ({len(hess)}) don't match")
num_train_data = self.train_set.num_data()
if len(grad) != num_train_data * self.__num_class:
raise ValueError(
f"Lengths of gradient ({len(grad)}) and Hessian ({len(hess)}) "
f"don't match training data length ({num_train_data}) * "
f"number of models per one iteration ({self.__num_class})"
)
is_finished = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom(
self.handle,
grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
return is_finished.value == 1
def rollback_one_iter(self):
"""Rollback one iteration.
Returns
-------
self : Booster
Booster with rolled back one iteration.
"""
_safe_call(_LIB.LGBM_BoosterRollbackOneIter(
self.handle))
self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
return self
def current_iteration(self):
"""Get the index of the current iteration.
Returns
-------
cur_iter : int
The index of the current iteration.
"""
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
def num_model_per_iteration(self):
"""Get number of models per iteration.
Returns
-------
model_per_iter : int
The number of models per iteration.
"""
model_per_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterNumModelPerIteration(
self.handle,
ctypes.byref(model_per_iter)))
return model_per_iter.value
def num_trees(self):
"""Get number of weak sub-models.
Returns
-------
num_trees : int
The number of weak sub-models.
"""
num_trees = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterNumberOfTotalModel(
self.handle,
ctypes.byref(num_trees)))
return num_trees.value
def upper_bound(self):
"""Get upper bound value of a model.
Returns
-------
upper_bound : double
Upper bound value of the model.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetUpperBoundValue(
self.handle,
ctypes.byref(ret)))
return ret.value
def lower_bound(self):
"""Get lower bound value of a model.
Returns
-------
lower_bound : double
Lower bound value of the model.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetLowerBoundValue(
self.handle,
ctypes.byref(ret)))
return ret.value
def eval(self, data, name, feval=None):
"""Evaluate for data.
Parameters
----------
data : Dataset
Data for the evaluating.
name : str
Name of the data.
feval : callable, list of callable, or None, optional (default=None)
Customized evaluation function.
Each evaluation function should accept two parameters: preds, eval_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : numpy 1-D array or numpy 2-D array (for multi-class task)
The predicted values.
For multi-class task, preds are numpy 2-D array of shape = [n_samples, n_classes].
If custom objective function is used, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
eval_data : Dataset
A ``Dataset`` to evaluate.
eval_name : str
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
Returns
-------
result : list
List with evaluation results.
"""
if not isinstance(data, Dataset):
raise TypeError("Can only eval for Dataset instance")
data_idx = -1
if data is self.train_set:
data_idx = 0
else:
for i in range(len(self.valid_sets)):
if data is self.valid_sets[i]:
data_idx = i + 1
break
# need to push new valid data
if data_idx == -1:
self.add_valid(data, name)
data_idx = self.__num_dataset - 1
return self.__inner_eval(name, data_idx, feval)
def eval_train(self, feval=None):
"""Evaluate for training data.
Parameters
----------
feval : callable, list of callable, or None, optional (default=None)
Customized evaluation function.
Each evaluation function should accept two parameters: preds, eval_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : numpy 1-D array or numpy 2-D array (for multi-class task)
The predicted values.
For multi-class task, preds are numpy 2-D array of shape = [n_samples, n_classes].
If custom objective function is used, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
eval_data : Dataset
The training dataset.
eval_name : str
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
Returns
-------
result : list
List with evaluation results.
"""
return self.__inner_eval(self._train_data_name, 0, feval)
def eval_valid(self, feval=None):
"""Evaluate for validation data.
Parameters
----------
feval : callable, list of callable, or None, optional (default=None)
Customized evaluation function.
Each evaluation function should accept two parameters: preds, eval_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : numpy 1-D array or numpy 2-D array (for multi-class task)
The predicted values.
For multi-class task, preds are numpy 2-D array of shape = [n_samples, n_classes].
If custom objective function is used, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
eval_data : Dataset
The validation dataset.
eval_name : str
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
Returns
-------
result : list
List with evaluation results.
"""
return [item for i in range(1, self.__num_dataset)
for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)]
def save_model(self, filename, num_iteration=None, start_iteration=0, importance_type='split'):
"""Save Booster to file.
Parameters
----------
filename : str or pathlib.Path
Filename to save Booster.
num_iteration : int or None, optional (default=None)
Index of the iteration that should be saved.
If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
If <= 0, all iterations are saved.
start_iteration : int, optional (default=0)
Start index of the iteration that should be saved.
importance_type : str, optional (default="split")
What type of feature importance should be saved.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
self : Booster
Returns self.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
_safe_call(_LIB.LGBM_BoosterSaveModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
c_str(str(filename))))
_dump_pandas_categorical(self.pandas_categorical, filename)
return self
def shuffle_models(self, start_iteration=0, end_iteration=-1):
"""Shuffle models.
Parameters
----------
start_iteration : int, optional (default=0)
The first iteration that will be shuffled.
end_iteration : int, optional (default=-1)
The last iteration that will be shuffled.
If <= 0, means the last available iteration.
Returns
-------
self : Booster
Booster with shuffled models.
"""
_safe_call(_LIB.LGBM_BoosterShuffleModels(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(end_iteration)))
return self
def model_from_string(self, model_str):
"""Load Booster from a string.
Parameters
----------
model_str : str
Model will be loaded from this string.
Returns
-------
self : Booster
Loaded Booster object.
"""
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
self._free_buffer()
self.handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(model_str=model_str)
return self
def model_to_string(self, num_iteration=None, start_iteration=0, importance_type='split'):
"""Save Booster to string.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be saved.
If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
If <= 0, all iterations are saved.
start_iteration : int, optional (default=0)
Start index of the iteration that should be saved.
importance_type : str, optional (default="split")
What type of feature importance should be saved.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
str_repr : str
String representation of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, re-allocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = string_buffer.value.decode('utf-8')
ret += _dump_pandas_categorical(self.pandas_categorical)
return ret
def dump_model(self, num_iteration=None, start_iteration=0, importance_type='split', object_hook=None):
"""Dump Booster to JSON format.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be dumped.
If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped.
If <= 0, all iterations are dumped.
start_iteration : int, optional (default=0)
Start index of the iteration that should be dumped.
importance_type : str, optional (default="split")
What type of feature importance should be dumped.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
object_hook : callable or None, optional (default=None)
If not None, ``object_hook`` is a function called while parsing the json
string returned by the C API. It may be used to alter the json, to store
specific values while building the json structure. It avoids
walking through the structure again. It saves a significant amount
of time if the number of trees is huge.
Signature is ``def object_hook(node: dict) -> dict``.
None is equivalent to ``lambda node: node``.
See documentation of ``json.loads()`` for further details.
Returns
-------
json_repr : dict
JSON format of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, reallocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = json.loads(string_buffer.value.decode('utf-8'), object_hook=object_hook)
ret['pandas_categorical'] = json.loads(json.dumps(self.pandas_categorical,
default=json_default_with_numpy))
return ret
def predict(self, data, start_iteration=0, num_iteration=None,
raw_score=False, pred_leaf=False, pred_contrib=False,
data_has_header=False, **kwargs):
"""Make a prediction.
Parameters
----------
data : str, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for prediction.
If str or pathlib.Path, it represents the path to a text file (CSV, TSV, or LibSVM).
start_iteration : int, optional (default=0)
Start index of the iteration to predict.
If <= 0, starts from the first iteration.
num_iteration : int or None, optional (default=None)
Total number of iterations used in the prediction.
If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
otherwise, all iterations from ``start_iteration`` are used (no limits).
If <= 0, all iterations from ``start_iteration`` are used (no limits).
raw_score : bool, optional (default=False)
Whether to predict raw scores.
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
.. note::
If you want to get more explanations for your model's predictions using SHAP values,
like SHAP interaction values,
you can install the shap package (https://github.com/slundberg/shap).
Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
column, where the last column is the expected value.
data_has_header : bool, optional (default=False)
Whether the data has header.
Used only if data is str.
**kwargs
Other parameters for the prediction.
Returns
-------
result : numpy array, scipy.sparse or list of scipy.sparse
Prediction result.
Can be sparse or a list of sparse objects (each element represents predictions for one class) for feature contributions (when ``pred_contrib=True``).
"""
predictor = self._to_predictor(deepcopy(kwargs))
if num_iteration is None:
if start_iteration <= 0:
num_iteration = self.best_iteration
else:
num_iteration = -1
return predictor.predict(data, start_iteration, num_iteration,
raw_score, pred_leaf, pred_contrib,
data_has_header)
def refit(
self,
data,
label,
decay_rate=0.9,
reference=None,
weight=None,
group=None,
init_score=None,
feature_name='auto',
categorical_feature='auto',
dataset_params=None,
free_raw_data=True,
**kwargs
):
"""Refit the existing Booster by new data.
Parameters
----------
data : str, pathlib.Path, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for refit.
If str or pathlib.Path, it represents the path to a text file (CSV, TSV, or LibSVM).
label : list, numpy 1-D array or pandas Series / one-column DataFrame
Label for refit.
decay_rate : float, optional (default=0.9)
Decay rate of refit,
will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees.
reference : Dataset or None, optional (default=None)
Reference for ``data``.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each ``data`` instance. Weights should be non-negative.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query size for ``data``.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
init_score : list, list of lists (for multi-class task), numpy array, pandas Series, pandas DataFrame (for multi-class task), or None, optional (default=None)
Init score for ``data``.
feature_name : list of str, or 'auto', optional (default="auto")
Feature names for ``data``.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of str or int, or 'auto', optional (default="auto")
Categorical features for ``data``.
If list of int, interpreted as indices.
If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
All values in categorical features will be cast to int32 and thus should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
The output cannot be monotonically constrained with respect to a categorical feature.
Floating point numbers in categorical features will be rounded towards 0.
dataset_params : dict or None, optional (default=None)
Other parameters for Dataset ``data``.
free_raw_data : bool, optional (default=True)
If True, raw data is freed after constructing inner Dataset for ``data``.
**kwargs
Other parameters for refit.
These parameters will be passed to ``predict`` method.
Returns
-------
result : Booster
Refitted Booster.
"""
if self.__set_objective_to_none:
raise LightGBMError('Cannot refit due to null objective function.')
if dataset_params is None:
dataset_params = {}
predictor = self._to_predictor(deepcopy(kwargs))
leaf_preds = predictor.predict(data, -1, pred_leaf=True)
nrow, ncol = leaf_preds.shape
out_is_linear = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetLinear(
self.handle,
ctypes.byref(out_is_linear)))
new_params = _choose_param_value(
main_param_name="linear_tree",
params=self.params,
default_value=None
)
new_params["linear_tree"] = bool(out_is_linear.value)
new_params.update(dataset_params)
train_set = Dataset(
data=data,
label=label,
reference=reference,
weight=weight,
group=group,
init_score=init_score,
feature_name=feature_name,
categorical_feature=categorical_feature,
params=new_params,
free_raw_data=free_raw_data,
)
new_params['refit_decay_rate'] = decay_rate
new_booster = Booster(new_params, train_set)
# Copy models
_safe_call(_LIB.LGBM_BoosterMerge(
new_booster.handle,
predictor.handle))
leaf_preds = leaf_preds.reshape(-1)
ptr_data, _, _ = c_int_array(leaf_preds)
_safe_call(_LIB.LGBM_BoosterRefit(
new_booster.handle,
ptr_data,
ctypes.c_int32(nrow),
ctypes.c_int32(ncol)))
new_booster.network = self.network
new_booster.__attr = self.__attr.copy()
return new_booster
def get_leaf_output(self, tree_id, leaf_id):
"""Get the output of a leaf.
Parameters
----------
tree_id : int
The index of the tree.
leaf_id : int
The index of the leaf in the tree.
Returns
-------
result : float
The output of the leaf.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetLeafValue(
self.handle,
ctypes.c_int(tree_id),
ctypes.c_int(leaf_id),
ctypes.byref(ret)))
return ret.value
def _to_predictor(self, pred_parameter=None):
"""Convert to predictor."""
predictor = _InnerPredictor(booster_handle=self.handle, pred_parameter=pred_parameter)
predictor.pandas_categorical = self.pandas_categorical
return predictor
def num_feature(self):
"""Get number of features.
Returns
-------
num_feature : int
The number of features.
"""
out_num_feature = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumFeature(
self.handle,
ctypes.byref(out_num_feature)))
return out_num_feature.value
def feature_name(self):
"""Get names of features.
Returns
-------
result : list of str
List with names of features.
"""
num_feature = self.num_feature()
# Get name of features
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(reserved_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if num_feature != tmp_out_len.value:
raise ValueError("Length of feature names doesn't equal with num_feature")
actual_string_buffer_size = required_string_buffer_size.value
# if buffer length is not long enough, reallocate buffers
if reserved_string_buffer_size < actual_string_buffer_size:
string_buffers = [ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(actual_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
return [string_buffers[i].value.decode('utf-8') for i in range(num_feature)]
def feature_importance(self, importance_type='split', iteration=None):
"""Get feature importances.
Parameters
----------
importance_type : str, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
iteration : int or None, optional (default=None)
Limit number of iterations in the feature importance calculation.
If None, if the best iteration exists, it is used; otherwise, all trees are used.
If <= 0, all trees are used (no limits).
Returns
-------
result : numpy array
Array with feature importances.
"""
if iteration is None:
iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
result = np.empty(self.num_feature(), dtype=np.float64)
_safe_call(_LIB.LGBM_BoosterFeatureImportance(
self.handle,
ctypes.c_int(iteration),
ctypes.c_int(importance_type_int),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if importance_type_int == C_API_FEATURE_IMPORTANCE_SPLIT:
return result.astype(np.int32)
else:
return result
def get_split_value_histogram(self, feature, bins=None, xgboost_style=False):
"""Get split value histogram for the specified feature.
Parameters
----------
feature : int or str
The feature name or index the histogram is calculated for.
If int, interpreted as index.
If str, interpreted as name.
.. warning::
Categorical features are not supported.
bins : int, str or None, optional (default=None)
The maximum number of bins.
If None, or int and > number of unique split values and ``xgboost_style=True``,
the number of bins equals number of unique split values.
If str, it should be one from the list of the supported values by ``numpy.histogram()`` function.
xgboost_style : bool, optional (default=False)
Whether the returned result should be in the same form as it is in XGBoost.
If False, the returned value is tuple of 2 numpy arrays as it is in ``numpy.histogram()`` function.
If True, the returned value is matrix, in which the first column is the right edges of non-empty bins
and the second one is the histogram values.
Returns
-------
result_tuple : tuple of 2 numpy arrays
If ``xgboost_style=False``, the values of the histogram of used splitting values for the specified feature
and the bin edges.
result_array_like : numpy array or pandas DataFrame (if pandas is installed)
If ``xgboost_style=True``, the histogram of used splitting values for the specified feature.
"""
def add(root):
"""Recursively add thresholds."""
if 'split_index' in root: # non-leaf
if feature_names is not None and isinstance(feature, str):
split_feature = feature_names[root['split_feature']]
else:
split_feature = root['split_feature']
if split_feature == feature:
if isinstance(root['threshold'], str):
raise LightGBMError('Cannot compute split value histogram for the categorical feature')
else:
values.append(root['threshold'])
add(root['left_child'])
add(root['right_child'])
model = self.dump_model()
feature_names = model.get('feature_names')
tree_infos = model['tree_info']
values = []
for tree_info in tree_infos:
add(tree_info['tree_structure'])
if bins is None or isinstance(bins, int) and xgboost_style:
n_unique = len(np.unique(values))
bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)
hist, bin_edges = np.histogram(values, bins=bins)
if xgboost_style:
ret = np.column_stack((bin_edges[1:], hist))
ret = ret[ret[:, 1] > 0]
if PANDAS_INSTALLED:
return pd_DataFrame(ret, columns=['SplitValue', 'Count'])
else:
return ret
else:
return hist, bin_edges
def __inner_eval(self, data_name, data_idx, feval=None):
"""Evaluate training or validation data."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
self.__get_eval_info()
ret = []
if self.__num_inner_eval > 0:
result = np.empty(self.__num_inner_eval, dtype=np.float64)
tmp_out_len = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetEval(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if tmp_out_len.value != self.__num_inner_eval:
raise ValueError("Wrong length of eval results")
for i in range(self.__num_inner_eval):
ret.append((data_name, self.__name_inner_eval[i],
result[i], self.__higher_better_inner_eval[i]))
if callable(feval):
feval = [feval]
if feval is not None:
if data_idx == 0:
cur_data = self.train_set
else:
cur_data = self.valid_sets[data_idx - 1]
for eval_function in feval:
if eval_function is None:
continue
feval_ret = eval_function(self.__inner_predict(data_idx), cur_data)
if isinstance(feval_ret, list):
for eval_name, val, is_higher_better in feval_ret:
ret.append((data_name, eval_name, val, is_higher_better))
else:
eval_name, val, is_higher_better = feval_ret
ret.append((data_name, eval_name, val, is_higher_better))
return ret
def __inner_predict(self, data_idx):
"""Predict for training and validation dataset."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
if self.__inner_predict_buffer[data_idx] is None:
if data_idx == 0:
n_preds = self.train_set.num_data() * self.__num_class
else:
n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class
self.__inner_predict_buffer[data_idx] = np.empty(n_preds, dtype=np.float64)
# avoid to predict many time in one iteration
if not self.__is_predicted_cur_iter[data_idx]:
tmp_out_len = ctypes.c_int64(0)
data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_double))
_safe_call(_LIB.LGBM_BoosterGetPredict(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
data_ptr))
if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]):
raise ValueError(f"Wrong length of predict results for data {data_idx}")
self.__is_predicted_cur_iter[data_idx] = True
result = self.__inner_predict_buffer[data_idx]
if self.__num_class > 1:
num_data = result.size // self.__num_class
result = result.reshape(num_data, self.__num_class, order='F')
return result
def __get_eval_info(self):
"""Get inner evaluation count and names."""
if self.__need_reload_eval_info:
self.__need_reload_eval_info = False
out_num_eval = ctypes.c_int(0)
# Get num of inner evals
_safe_call(_LIB.LGBM_BoosterGetEvalCounts(
self.handle,
ctypes.byref(out_num_eval)))
self.__num_inner_eval = out_num_eval.value
if self.__num_inner_eval > 0:
# Get name of eval metrics
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [
ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(self.__num_inner_eval)
]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
ctypes.c_int(self.__num_inner_eval),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(reserved_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if self.__num_inner_eval != tmp_out_len.value:
raise ValueError("Length of eval names doesn't equal with num_evals")
actual_string_buffer_size = required_string_buffer_size.value
# if buffer length is not long enough, reallocate buffers
if reserved_string_buffer_size < actual_string_buffer_size:
string_buffers = [
ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(self.__num_inner_eval)
]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
ctypes.c_int(self.__num_inner_eval),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(actual_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
self.__name_inner_eval = [
string_buffers[i].value.decode('utf-8') for i in range(self.__num_inner_eval)
]
self.__higher_better_inner_eval = [
name.startswith(('auc', 'ndcg@', 'map@', 'average_precision')) for name in self.__name_inner_eval
]
def attr(self, key):
"""Get attribute string from the Booster.
Parameters
----------
key : str
The name of the attribute.
Returns
-------
value : str or None
The attribute value.
Returns None if attribute does not exist.
"""
return self.__attr.get(key, None)
def set_attr(self, **kwargs):
"""Set attributes to the Booster.
Parameters
----------
**kwargs
The attributes to set.
Setting a value to None deletes an attribute.
Returns
-------
self : Booster
Booster with set attributes.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, str):
raise ValueError("Only string values are accepted")
self.__attr[key] = value
else:
self.__attr.pop(key, None)
return self
| 42.107546
| 166
| 0.593319
|
33dc77f501bf541544a266a888e94938694fac54
| 1,979
|
py
|
Python
|
setup.py
|
MeliorAI/meliorTransformers
|
b2936e1aac23e63e0b737d03975124c31a960812
|
[
"Apache-2.0"
] | 1
|
2020-08-06T10:48:49.000Z
|
2020-08-06T10:48:49.000Z
|
setup.py
|
MeliorAI/meliorTransformers
|
b2936e1aac23e63e0b737d03975124c31a960812
|
[
"Apache-2.0"
] | 2
|
2020-02-13T12:45:57.000Z
|
2020-04-14T11:30:33.000Z
|
setup.py
|
MeliorAI/meliorTransformers
|
b2936e1aac23e63e0b737d03975124c31a960812
|
[
"Apache-2.0"
] | 2
|
2020-07-21T12:43:51.000Z
|
2021-08-13T15:21:22.000Z
|
import os
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
# Avoids IDE errors, but actual version is read from version.py
__version__ = None
with open("melior_transformers/version.py") as f:
exec(f.read())
# Get the long description from the README file
with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
upload_requires = [
"twine==3.1.1",
]
install_requires = [
"torch==1.3.1",
"tqdm==4.43.0",
"transformers==2.3.0",
"numpy==1.16.3",
"pandas==0.25.3",
"seqeval==0.0.12",
"scipy==1.4.1",
# "apex==0.9.10dev",
"scikit-learn~=0.20.2",
"tensorboardX==2.0",
"wandb==0.8.21",
"requests",
"regex",
"wandb",
"coloredlogs",
"sentence-transformers==0.2.5",
]
tests_requires = [
# test
"pytest-cov==2.7.1",
"pytest-localserver==0.5.0",
"pytest==5.1.3",
# lint/format/types
"black==19.10b0",
"flake8==3.7.8",
"pytype==2019.7.11",
"isort==4.3.21",
"pre-commit==1.21.0",
]
extras_requires = {
"test": tests_requires,
"dev": tests_requires + upload_requires,
"upload": upload_requires,
}
setup(
name="melior_transformers",
version=__version__,
author="MeliorAI",
author_email="flavius@melior.ai",
description="An easy-to-use wrapper library for the Transformers library.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/MeliorAI/meliorTransformers/",
packages=find_packages(),
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
python_requires=">=3.6",
install_requires=install_requires,
tests_require=tests_requires,
extras_require=extras_requires,
)
| 25.050633
| 79
| 0.641738
|
767ec74a98cb96a68f6952b1b6157f3c72b44c15
| 4,064
|
py
|
Python
|
alipay/aop/api/request/AlipayInsScenePetprofilePlatformprofileQueryRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/request/AlipayInsScenePetprofilePlatformprofileQueryRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/request/AlipayInsScenePetprofilePlatformprofileQueryRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayInsScenePetprofilePlatformprofileQueryModel import AlipayInsScenePetprofilePlatformprofileQueryModel
class AlipayInsScenePetprofilePlatformprofileQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayInsScenePetprofilePlatformprofileQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayInsScenePetprofilePlatformprofileQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.ins.scene.petprofile.platformprofile.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 28.027586
| 148
| 0.652559
|
c594403c878d4e3f17961340ba05bfa94d6a1a6f
| 2,186
|
py
|
Python
|
rllib/models/tests/test_convtranspose2d_stack.py
|
77loopin/ray
|
9322f6aab53f4ca5baf5a3573e1ffde12feae519
|
[
"Apache-2.0"
] | 21,382
|
2016-09-26T23:12:52.000Z
|
2022-03-31T21:47:45.000Z
|
rllib/models/tests/test_convtranspose2d_stack.py
|
77loopin/ray
|
9322f6aab53f4ca5baf5a3573e1ffde12feae519
|
[
"Apache-2.0"
] | 19,689
|
2016-09-17T08:21:25.000Z
|
2022-03-31T23:59:30.000Z
|
rllib/models/tests/test_convtranspose2d_stack.py
|
gramhagen/ray
|
c18caa4db36d466718bdbcb2229aa0b2dc03da1f
|
[
"Apache-2.0"
] | 4,114
|
2016-09-23T18:54:01.000Z
|
2022-03-31T15:07:32.000Z
|
import gym
import numpy as np
import os
from pathlib import Path
import unittest
from ray.rllib.models.preprocessors import GenericPixelPreprocessor
from ray.rllib.models.torch.modules.convtranspose2d_stack import \
ConvTranspose2DStack
from ray.rllib.utils.framework import try_import_torch, try_import_tf
from ray.rllib.utils.images import imread
torch, nn = try_import_torch()
tf1, tf, tfv = try_import_tf()
class TestConvTranspose2DStack(unittest.TestCase):
"""Tests our ConvTranspose2D Stack modules/layers."""
def test_convtranspose2d_stack(self):
"""Tests, whether the conv2d stack can be trained to predict an image.
"""
batch_size = 128
input_size = 1
module = ConvTranspose2DStack(input_size=input_size)
preprocessor = GenericPixelPreprocessor(
gym.spaces.Box(0, 255, (64, 64, 3), np.uint8), options={"dim": 64})
optim = torch.optim.Adam(module.parameters(), lr=0.0001)
rllib_dir = Path(__file__).parent.parent.parent
img_file = os.path.join(rllib_dir,
"tests/data/images/obstacle_tower.png")
img = imread(img_file)
# Preprocess.
img = preprocessor.transform(img)
# Make channels first.
img = np.transpose(img, (2, 0, 1))
# Add batch rank and repeat.
imgs = np.reshape(img, (1, ) + img.shape)
imgs = np.repeat(imgs, batch_size, axis=0)
# Move to torch.
imgs = torch.from_numpy(imgs)
init_loss = loss = None
for _ in range(10):
# Random inputs.
inputs = torch.from_numpy(
np.random.normal(0.0, 1.0, (batch_size, input_size))).float()
distribution = module(inputs)
# Construct a loss.
loss = -torch.mean(distribution.log_prob(imgs))
if init_loss is None:
init_loss = loss
print("loss={}".format(loss))
# Minimize loss.
loss.backward()
optim.step()
self.assertLess(loss, init_loss)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 33.630769
| 79
| 0.622598
|
b539ca8103a79a87d207ff4fc990e433815cc961
| 1,657
|
py
|
Python
|
triton/dns/dnssec/digest/base.py
|
jayvdb/triton
|
b64424c193b131721f172f94963d8d79b21804db
|
[
"MIT"
] | null | null | null |
triton/dns/dnssec/digest/base.py
|
jayvdb/triton
|
b64424c193b131721f172f94963d8d79b21804db
|
[
"MIT"
] | 1
|
2020-10-16T00:57:07.000Z
|
2020-10-27T13:02:24.000Z
|
triton/dns/dnssec/digest/base.py
|
jayvdb/triton
|
b64424c193b131721f172f94963d8d79b21804db
|
[
"MIT"
] | 1
|
2020-08-27T13:59:08.000Z
|
2020-08-27T13:59:08.000Z
|
from bitstring import BitArray
class Digest:
id: int
@classmethod
def hash_key(cls, key_resource_record):
"""
Hashes DNSKEY resource record (name + rdata)
:param key_resource_record:
:return:
"""
from triton.dns.message.domains.domain import Domain
h = cls.hasher.new()
h.update(
BitArray(bin=Domain.sub_encode(key_resource_record.name) + key_resource_record.rdata.Binary.full).bytes)
return h.digest()
@classmethod
def by_id(cls, id):
"""
Finds Digest type by its id
:param id:
:return:
"""
for subclass in Digest.__subclasses__():
if subclass.id == id:
return subclass
else:
raise Exception('Unknown digest id')
@classmethod
def verify_key_rr(cls, key_resource_record, ds_resource_record):
"""
Verifies that DNSKEY resource record matches DS resource record on parent
:param key_resource_record:
:param ds_resource_record:
:return:
"""
assert key_resource_record.rdata.key_tag == ds_resource_record.rdata.key_tag, 'Key tags mismatch'
hashed_key = cls.hash_key(key_resource_record)
if ds_resource_record.rdata._digest == hashed_key:
return True
return False
@classmethod
def verify_from_ds(cls, key_resource_record, ds_resource_record):
return Digest.by_id(ds_resource_record.rdata._digest_type).verify_key_rr(key_resource_record,
ds_resource_record)
| 31.264151
| 116
| 0.609535
|
bdcdf07e86ea51c53e5c2aca0f04ba96f07c07b8
| 4,416
|
py
|
Python
|
oneflow/compatible_single_client_python/test/ops/test_sparse_cross_entropy_ms.py
|
xcnick/oneflow
|
7b786b27069dec35d2493256011e773988c91f56
|
[
"Apache-2.0"
] | null | null | null |
oneflow/compatible_single_client_python/test/ops/test_sparse_cross_entropy_ms.py
|
xcnick/oneflow
|
7b786b27069dec35d2493256011e773988c91f56
|
[
"Apache-2.0"
] | null | null | null |
oneflow/compatible_single_client_python/test/ops/test_sparse_cross_entropy_ms.py
|
xcnick/oneflow
|
7b786b27069dec35d2493256011e773988c91f56
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import numpy as np
import tensorflow as tf
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
from collections import OrderedDict
from test_util import GenArgList
import test_global_storage
from test_util import type_name_to_flow_type
from test_util import type_name_to_np_type
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def compare_with_tensorflow(
device_type, data_type, label_type, num_classes, batch_size
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
if device_type == "cpu":
flow.config.gpu_device_num(0)
flow.config.cpu_device_num(4)
else:
flow.config.gpu_device_num(4)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
@flow.global_function(type="train", function_config=func_config)
def SparseSoftmaxCrossEntropyWithLogitsJob(
labels: oft.Numpy.Placeholder(
(batch_size,), dtype=type_name_to_flow_type[label_type]
)
):
with flow.scope.placement(device_type, "0:0"):
x = flow.get_variable(
"x",
shape=(batch_size, num_classes),
dtype=type_name_to_flow_type[data_type],
initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
trainable=True,
)
prediction = flow.nn.softmax(logits=x)
with flow.scope.placement(device_type, "0:0-3"):
lebels_distribute = flow.distribute.broadcast()
prediction_distribute = flow.distribute.split(len(prediction.shape) - 1)
loss = flow.nn.sparse_cross_entropy(
labels=labels.with_distribute(lebels_distribute),
prediction=prediction.with_distribute(prediction_distribute),
)
with flow.scope.placement(device_type, "0:0"):
loss = flow.math.square(loss)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(loss)
flow.watch(x, test_global_storage.Setter("x"))
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch(loss, test_global_storage.Setter("loss"))
flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))
return loss
# fake labels
labels = np.random.randint(0, num_classes, size=(batch_size,)).astype(
type_name_to_np_type[label_type]
)
# OneFlow
of_out = SparseSoftmaxCrossEntropyWithLogitsJob(labels).get()
# TensorFlow
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(test_global_storage.Get("x"))
tf_out = tf.nn.sparse_softmax_cross_entropy_with_logits(labels, x)
tf_out = tf.math.square(tf_out)
loss_diff = test_global_storage.Get("loss_diff")
tf_x_diff = tape.gradient(tf_out, x, loss_diff)
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=1e-5, atol=1e-5)
assert np.allclose(
test_global_storage.Get("x_diff"), tf_x_diff.numpy(), rtol=1e-5, atol=1e-5
)
flow.clear_default_session()
@flow.unittest.skip_unless_1n4d()
class TestSparseCrossEntropyMs(flow.unittest.TestCase):
def test_sparse_cross_entropy_with_logits(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["label_type"] = ["int32", "int64"]
arg_dict["num_classes"] = [1000]
arg_dict["batch_size"] = [64]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
if __name__ == "__main__":
unittest.main()
| 37.109244
| 84
| 0.688179
|
a413a247c967a7f9df28f97ce107f7eb8a144751
| 1,835
|
py
|
Python
|
route/recent_discuss.py
|
lsh23/openNAMU
|
18f780afe5e81ef1f347fe556b6fd98bb4914a53
|
[
"BSD-3-Clause"
] | null | null | null |
route/recent_discuss.py
|
lsh23/openNAMU
|
18f780afe5e81ef1f347fe556b6fd98bb4914a53
|
[
"BSD-3-Clause"
] | null | null | null |
route/recent_discuss.py
|
lsh23/openNAMU
|
18f780afe5e81ef1f347fe556b6fd98bb4914a53
|
[
"BSD-3-Clause"
] | null | null | null |
from .tool.func import *
def recent_discuss_2(conn):
curs = conn.cursor()
div = ''
if flask.request.args.get('what', 'normal') == 'normal':
div += '<a href="/recent_discuss?what=close">(' + load_lang('close_discussion') + ')</a>'
m_sub = 0
else:
div += '<a href="/recent_discuss">(' + load_lang('open_discussion') + ')</a>'
m_sub = ' (' + load_lang('closed') + ')'
div += '''
<hr class=\"main_hr\">
<table id="main_table_set">
<tbody>
<tr>
<td id="main_table_width_half">''' + load_lang('discussion_name') + '''</td>
<td id="main_table_width_half">''' + load_lang('time') + '''</td>
</tr>
'''
if m_sub == 0:
curs.execute(db_change("select title, sub, date from rd where not stop = 'O' order by date desc limit 50"))
else:
curs.execute(db_change("select title, sub, date from rd where stop = 'O' order by date desc limit 50"))
for data in curs.fetchall():
curs.execute(db_change("select code from topic where id = '1' and title = ? and sub = ?"), [data[0], data[1]])
get_code = curs.fetchall()
if get_code and get_code[0][0] != '':
get_code = get_code[0][0]
else:
get_code = '1'
title = html.escape(data[0])
sub = html.escape(data[1])
div += '<tr><td><a href="/thread/' + get_code + '">' + title + '</a> (' + sub + ')</td><td>' + data[2] + '</td></tr>'
div += '</tbody></table>'
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('recent_discussion'), wiki_set(), custom(), other2([m_sub, 0])],
data = div,
menu = 0
))
| 35.980392
| 125
| 0.497548
|
50b518b937d6f8f3a1c2b92b1e72bf5af31f2c21
| 31,575
|
py
|
Python
|
model_zoo/research/cv/tinynet/src/tinynet.py
|
xu-weizhen/mindspore
|
e55642e40b8ce9abafa8e50865b490f0317b4703
|
[
"Apache-2.0"
] | 55
|
2020-12-17T10:26:06.000Z
|
2022-03-28T07:18:26.000Z
|
model_zoo/research/cv/tinynet/src/tinynet.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | null | null | null |
model_zoo/research/cv/tinynet/src/tinynet.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | 14
|
2021-01-29T02:39:47.000Z
|
2022-03-23T05:00:26.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tinynet model definition"""
import math
import re
from copy import deepcopy
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore.ops import operations as P
from mindspore.common.initializer import Normal, Zero, One, initializer, Uniform
from mindspore import context, ms_function
from mindspore.common.parameter import Parameter
from mindspore import Tensor
# Imagenet constant values
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
# model structure configurations for TinyNets, values are
# (resolution multiplier, channel multiplier, depth multiplier)
# codes are inspired and partially adapted from
# https://github.com/rwightman/gen-efficientnet-pytorch
TINYNET_CFG = {"a": (0.86, 1.0, 1.2),
"b": (0.84, 0.75, 1.1),
"c": (0.825, 0.54, 0.85),
"d": (0.68, 0.54, 0.695),
"e": (0.475, 0.51, 0.60)}
relu = P.ReLU()
sigmoid = P.Sigmoid()
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
default_cfgs = {
'efficientnet_b0': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0-d6904d92.pth'),
'efficientnet_b1': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth',
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'efficientnet_b2': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2-cf78dc4d.pth',
input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890),
'efficientnet_b3': _cfg(
url='', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'efficientnet_b4': _cfg(
url='', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),
}
_DEBUG = False
# Default args for PyTorch BN impl
_BN_MOMENTUM_PT_DEFAULT = 0.1
_BN_EPS_PT_DEFAULT = 1e-5
_BN_ARGS_PT = dict(momentum=_BN_MOMENTUM_PT_DEFAULT, eps=_BN_EPS_PT_DEFAULT)
# Defaults used for Google/Tensorflow training of mobile networks /w
# RMSprop as per papers and TF reference implementations. PT momentum
# equiv for TF decay is (1 - TF decay)
# NOTE: momentum varies btw .99 and .9997 depending on source
# .99 in official TF TPU impl
# .9997 (/w .999 in search space) for paper
_BN_MOMENTUM_TF_DEFAULT = 1 - 0.99
_BN_EPS_TF_DEFAULT = 1e-3
_BN_ARGS_TF = dict(momentum=_BN_MOMENTUM_TF_DEFAULT, eps=_BN_EPS_TF_DEFAULT)
def _initialize_weight_goog(shape=None, layer_type='conv', bias=False):
"""Google style weight initialization"""
if layer_type not in ('conv', 'bn', 'fc'):
raise ValueError(
'The layer type is not known, the supported are conv, bn and fc')
if bias:
return Zero()
if layer_type == 'conv':
assert isinstance(shape, (tuple, list)) and len(
shape) == 3, 'The shape must be 3 scalars, and are in_chs, ks, out_chs respectively'
n = shape[1] * shape[1] * shape[2]
return Normal(math.sqrt(2.0 / n))
if layer_type == 'bn':
return One()
assert isinstance(shape, (tuple, list)) and len(
shape) == 2, 'The shape must be 2 scalars, and are in_chs, out_chs respectively'
n = shape[1]
init_range = 1.0 / math.sqrt(n)
return Uniform(init_range)
def _conv(in_channels, out_channels, kernel_size=3, stride=1, padding=0,
pad_mode='same', bias=False):
"""convolution wrapper"""
weight_init_value = _initialize_weight_goog(
shape=(in_channels, kernel_size, out_channels))
bias_init_value = _initialize_weight_goog(bias=True) if bias else None
if bias:
return nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, pad_mode=pad_mode, weight_init=weight_init_value,
has_bias=bias, bias_init=bias_init_value)
return nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, pad_mode=pad_mode, weight_init=weight_init_value,
has_bias=bias)
def _conv1x1(in_channels, out_channels, stride=1, padding=0, pad_mode='same', bias=False):
"""1x1 convolution wrapper"""
weight_init_value = _initialize_weight_goog(
shape=(in_channels, 1, out_channels))
bias_init_value = _initialize_weight_goog(bias=True) if bias else None
if bias:
return nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride,
padding=padding, pad_mode=pad_mode, weight_init=weight_init_value,
has_bias=bias, bias_init=bias_init_value)
return nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride,
padding=padding, pad_mode=pad_mode, weight_init=weight_init_value,
has_bias=bias)
def _conv_group(in_channels, out_channels, group, kernel_size=3, stride=1, padding=0,
pad_mode='same', bias=False):
"""group convolution wrapper"""
weight_init_value = _initialize_weight_goog(
shape=(in_channels, kernel_size, out_channels))
bias_init_value = _initialize_weight_goog(bias=True) if bias else None
if bias:
return nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, pad_mode=pad_mode, weight_init=weight_init_value,
group=group, has_bias=bias, bias_init=bias_init_value)
return nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, pad_mode=pad_mode, weight_init=weight_init_value,
group=group, has_bias=bias)
def _fused_bn(channels, momentum=0.1, eps=1e-4, gamma_init=1, beta_init=0):
return nn.BatchNorm2d(channels, eps=eps, momentum=1-momentum, gamma_init=gamma_init,
beta_init=beta_init)
def _dense(in_channels, output_channels, bias=True, activation=None):
weight_init_value = _initialize_weight_goog(shape=(in_channels, output_channels),
layer_type='fc')
bias_init_value = _initialize_weight_goog(bias=True) if bias else None
if bias:
return nn.Dense(in_channels, output_channels, weight_init=weight_init_value,
bias_init=bias_init_value, has_bias=bias, activation=activation)
return nn.Dense(in_channels, output_channels, weight_init=weight_init_value,
has_bias=bias, activation=activation)
def _resolve_bn_args(kwargs):
bn_args = _BN_ARGS_TF.copy() if kwargs.pop(
'bn_tf', False) else _BN_ARGS_PT.copy()
bn_momentum = kwargs.pop('bn_momentum', None)
if bn_momentum is not None:
bn_args['momentum'] = bn_momentum
bn_eps = kwargs.pop('bn_eps', None)
if bn_eps is not None:
bn_args['eps'] = bn_eps
return bn_args
def _round_channels(channels, multiplier=1.0, divisor=8, channel_min=None):
"""Round number of filters based on depth multiplier."""
if not multiplier:
return channels
channels *= multiplier
channel_min = channel_min or divisor
new_channels = max(
int(channels + divisor / 2) // divisor * divisor,
channel_min)
# Make sure that round down does not go down by more than 10%.
if new_channels < 0.9 * channels:
new_channels += divisor
return new_channels
def _parse_ksize(ss):
if ss.isdigit():
return int(ss)
return [int(k) for k in ss.split('.')]
def _decode_block_str(block_str, depth_multiplier=1.0):
""" Decode block definition string
Gets a list of block arg (dicts) through a string notation of arguments.
E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip
All args can exist in any order with the exception of the leading string which
is assumed to indicate the block type.
leading string - block type (
ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct)
r - number of repeat blocks,
k - kernel size,
s - strides (1-9),
e - expansion ratio,
c - output channels,
se - squeeze/excitation ratio
n - activation fn ('re', 'r6', 'hs', or 'sw')
Args:
block_str: a string representation of block arguments.
Returns:
A list of block args (dicts)
Raises:
ValueError: if the string def not properly specified (TODO)
"""
assert isinstance(block_str, str)
ops = block_str.split('_')
block_type = ops[0] # take the block type off the front
ops = ops[1:]
options = {}
noskip = False
for op in ops:
if op == 'noskip':
noskip = True
elif op.startswith('n'):
# activation fn
key = op[0]
v = op[1:]
if v == 're':
print('not support')
elif v == 'r6':
print('not support')
elif v == 'hs':
print('not support')
elif v == 'sw':
print('not support')
else:
continue
options[key] = value
else:
# all numeric options
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
act_fn = options['n'] if 'n' in options else None
exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1
pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1
fake_in_chs = int(options['fc']) if 'fc' in options else 0
num_repeat = int(options['r'])
# each type of block has different valid arguments, fill accordingly
if block_type == 'ir':
block_args = dict(
block_type=block_type,
dw_kernel_size=_parse_ksize(options['k']),
exp_kernel_size=exp_kernel_size,
pw_kernel_size=pw_kernel_size,
out_chs=int(options['c']),
exp_ratio=float(options['e']),
se_ratio=float(options['se']) if 'se' in options else None,
stride=int(options['s']),
act_fn=act_fn,
noskip=noskip,
)
elif block_type in ('ds', 'dsa'):
block_args = dict(
block_type=block_type,
dw_kernel_size=_parse_ksize(options['k']),
pw_kernel_size=pw_kernel_size,
out_chs=int(options['c']),
se_ratio=float(options['se']) if 'se' in options else None,
stride=int(options['s']),
act_fn=act_fn,
pw_act=block_type == 'dsa',
noskip=block_type == 'dsa' or noskip,
)
elif block_type == 'er':
block_args = dict(
block_type=block_type,
exp_kernel_size=_parse_ksize(options['k']),
pw_kernel_size=pw_kernel_size,
out_chs=int(options['c']),
exp_ratio=float(options['e']),
fake_in_chs=fake_in_chs,
se_ratio=float(options['se']) if 'se' in options else None,
stride=int(options['s']),
act_fn=act_fn,
noskip=noskip,
)
elif block_type == 'cn':
block_args = dict(
block_type=block_type,
kernel_size=int(options['k']),
out_chs=int(options['c']),
stride=int(options['s']),
act_fn=act_fn,
)
else:
assert False, 'Unknown block type (%s)' % block_type
return block_args, num_repeat
def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'):
""" Per-stage depth scaling
Scales the block repeats in each stage. This depth scaling impl maintains
compatibility with the EfficientNet scaling method, while allowing sensible
scaling for other models that may have multiple block arg definitions in each stage.
"""
# We scale the total repeat count for each stage, there may be multiple
# block arg defs per stage so we need to sum.
num_repeat = sum(repeats)
if depth_trunc == 'round':
# Truncating to int by rounding allows stages with few repeats to remain
# proportionally smaller for longer. This is a good choice when stage definitions
# include single repeat stages that we'd prefer to keep that way as long as possible
num_repeat_scaled = max(1, round(num_repeat * depth_multiplier))
else:
# The default for EfficientNet truncates repeats to int via 'ceil'.
# Any multiplier > 1.0 will result in an increased depth for every stage.
num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier))
# Proportionally distribute repeat count scaling to each block definition in the stage.
# Allocation is done in reverse as it results in the first block being less likely to be scaled.
# The first block makes less sense to repeat in most of the arch definitions.
repeats_scaled = []
for r in repeats[::-1]:
rs = max(1, round((r / num_repeat * num_repeat_scaled)))
repeats_scaled.append(rs)
num_repeat -= r
num_repeat_scaled -= rs
repeats_scaled = repeats_scaled[::-1]
# Apply the calculated scaling to each block arg in the stage
sa_scaled = []
for ba, rep in zip(stack_args, repeats_scaled):
sa_scaled.extend([deepcopy(ba) for _ in range(rep)])
return sa_scaled
def _decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil'):
"""further decode the architecture definition into model-ready format"""
arch_args = []
for _, block_strings in enumerate(arch_def):
assert isinstance(block_strings, list)
stack_args = []
repeats = []
for block_str in block_strings:
assert isinstance(block_str, str)
ba, rep = _decode_block_str(block_str)
stack_args.append(ba)
repeats.append(rep)
arch_args.append(_scale_stage_depth(
stack_args, repeats, depth_multiplier, depth_trunc))
return arch_args
class Swish(nn.Cell):
"""swish activation function"""
def __init__(self):
super(Swish, self).__init__()
self.sigmoid = P.Sigmoid()
def construct(self, x):
return x * self.sigmoid(x)
@ms_function
def swish(x):
return x * nn.Sigmoid()(x)
class BlockBuilder(nn.Cell):
"""build efficient-net convolution blocks"""
def __init__(self, builder_in_channels, builder_block_args, channel_multiplier=1.0,
channel_divisor=8, channel_min=None, pad_type='', act_fn=None,
se_gate_fn=sigmoid, se_reduce_mid=False, bn_args=None,
drop_connect_rate=0., verbose=False):
super(BlockBuilder, self).__init__()
self.channel_multiplier = channel_multiplier
self.channel_divisor = channel_divisor
self.channel_min = channel_min
self.pad_type = pad_type
self.act_fn = Swish()
self.se_gate_fn = se_gate_fn
self.se_reduce_mid = se_reduce_mid
self.bn_args = bn_args
self.drop_connect_rate = drop_connect_rate
self.verbose = verbose
# updated during build
self.in_chs = None
self.block_idx = 0
self.block_count = 0
self.layer = self._make_layer(builder_in_channels, builder_block_args)
def _round_channels(self, chs):
return _round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min)
def _make_block(self, ba):
"""make the current block based on the block argument"""
bt = ba.pop('block_type')
ba['in_chs'] = self.in_chs
ba['out_chs'] = self._round_channels(ba['out_chs'])
if 'fake_in_chs' in ba and ba['fake_in_chs']:
# this is a hack to work around mismatch in origin impl input filters
ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs'])
ba['bn_args'] = self.bn_args
ba['pad_type'] = self.pad_type
# block act fn overrides the model default
ba['act_fn'] = ba['act_fn'] if ba['act_fn'] is not None else self.act_fn
assert ba['act_fn'] is not None
if bt == 'ir':
ba['drop_connect_rate'] = self.drop_connect_rate * \
self.block_idx / self.block_count
ba['se_gate_fn'] = self.se_gate_fn
ba['se_reduce_mid'] = self.se_reduce_mid
block = InvertedResidual(**ba)
elif bt in ('ds', 'dsa'):
ba['drop_connect_rate'] = self.drop_connect_rate * \
self.block_idx / self.block_count
block = DepthwiseSeparableConv(**ba)
else:
assert False, 'Uknkown block type (%s) while building model.' % bt
self.in_chs = ba['out_chs']
return block
def _make_stack(self, stack_args):
"""make a stack of blocks"""
blocks = []
# each stack (stage) contains a list of block arguments
for i, ba in enumerate(stack_args):
if i >= 1:
# only the first block in any stack can have a stride > 1
ba['stride'] = 1
block = self._make_block(ba)
blocks.append(block)
self.block_idx += 1 # incr global idx (across all stacks)
return nn.SequentialCell(blocks)
def _make_layer(self, in_chs, block_args):
""" Build the entire layer
Args:
in_chs: Number of input-channels passed to first block
block_args: A list of lists, outer list defines stages, inner
list contains strings defining block configuration(s)
Return:
List of block stacks (each stack wrapped in nn.Sequential)
"""
self.in_chs = in_chs
self.block_count = sum([len(x) for x in block_args])
self.block_idx = 0
blocks = []
# outer list of block_args defines the stacks ('stages' by some conventions)
for _, stack in enumerate(block_args):
assert isinstance(stack, list)
stack = self._make_stack(stack)
blocks.append(stack)
return nn.SequentialCell(blocks)
def construct(self, x):
return self.layer(x)
class DepthWiseConv(nn.Cell):
"""depth-wise convolution"""
def __init__(self, in_planes, kernel_size, stride):
super(DepthWiseConv, self).__init__()
platform = context.get_context("device_target")
weight_shape = [1, kernel_size, in_planes]
weight_init = _initialize_weight_goog(shape=weight_shape)
if platform == "GPU":
self.depthwise_conv = P.Conv2D(out_channel=in_planes*1,
kernel_size=kernel_size,
stride=stride,
pad=int(kernel_size/2),
pad_mode="pad",
group=in_planes)
self.weight = Parameter(initializer(weight_init,
[in_planes*1, 1, kernel_size, kernel_size]))
else:
self.depthwise_conv = P.DepthwiseConv2dNative(channel_multiplier=1,
kernel_size=kernel_size,
stride=stride, pad_mode='pad',
pad=int(kernel_size/2))
self.weight = Parameter(initializer(weight_init,
[1, in_planes, kernel_size, kernel_size]))
def construct(self, x):
x = self.depthwise_conv(x, self.weight)
return x
class DropConnect(nn.Cell):
"""drop connect implementation"""
def __init__(self, drop_connect_rate=0., seed0=0, seed1=0):
super(DropConnect, self).__init__()
self.shape = P.Shape()
self.dtype = P.DType()
self.keep_prob = 1 - drop_connect_rate
self.dropout = P.Dropout(keep_prob=self.keep_prob)
self.keep_prob_tensor = Tensor(self.keep_prob, dtype=mstype.float32)
def construct(self, x):
shape = self.shape(x)
dtype = self.dtype(x)
ones_tensor = P.Fill()(dtype, (shape[0], 1, 1, 1), 1)
_, mask = self.dropout(ones_tensor)
x = x * mask
x = x / self.keep_prob_tensor
return x
def drop_connect(inputs, training=False, drop_connect_rate=0.):
if not training:
return inputs
return DropConnect(drop_connect_rate)(inputs)
class SqueezeExcite(nn.Cell):
"""squeeze-excite implementation"""
def __init__(self, in_chs, reduce_chs=None, act_fn=relu, gate_fn=sigmoid):
super(SqueezeExcite, self).__init__()
self.act_fn = Swish()
self.gate_fn = gate_fn
reduce_chs = reduce_chs or in_chs
self.conv_reduce = nn.Conv2d(in_channels=in_chs, out_channels=reduce_chs,
kernel_size=1, has_bias=True, pad_mode='pad')
self.conv_expand = nn.Conv2d(in_channels=reduce_chs, out_channels=in_chs,
kernel_size=1, has_bias=True, pad_mode='pad')
self.avg_global_pool = P.ReduceMean(keep_dims=True)
def construct(self, x):
x_se = self.avg_global_pool(x, (2, 3))
x_se = self.conv_reduce(x_se)
x_se = self.act_fn(x_se)
x_se = self.conv_expand(x_se)
x_se = self.gate_fn(x_se)
x = x * x_se
return x
class DepthwiseSeparableConv(nn.Cell):
"""depth-wise convolution -> (squeeze-excite) -> point-wise convolution"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, pad_type='', act_fn=relu, noskip=False,
pw_kernel_size=1, pw_act=False, se_ratio=0., se_gate_fn=sigmoid,
bn_args=None, drop_connect_rate=0.):
super(DepthwiseSeparableConv, self).__init__()
assert stride in [1, 2], 'stride must be 1 or 2'
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip
self.has_pw_act = pw_act
self.act_fn = Swish()
self.drop_connect_rate = drop_connect_rate
self.conv_dw = DepthWiseConv(in_chs, dw_kernel_size, stride)
self.bn1 = _fused_bn(in_chs, **bn_args)
if self.has_se:
self.se = SqueezeExcite(in_chs, reduce_chs=max(1, int(in_chs * se_ratio)),
act_fn=act_fn, gate_fn=se_gate_fn)
self.conv_pw = _conv1x1(in_chs, out_chs)
self.bn2 = _fused_bn(out_chs, **bn_args)
def construct(self, x):
"""forward the depthwise separable conv"""
identity = x
x = self.conv_dw(x)
x = self.bn1(x)
x = self.act_fn(x)
if self.has_se:
x = self.se(x)
x = self.conv_pw(x)
x = self.bn2(x)
if self.has_pw_act:
x = self.act_fn(x)
if self.has_residual:
if self.drop_connect_rate > 0.:
x = drop_connect(x, self.training, self.drop_connect_rate)
x = x + identity
return x
class InvertedResidual(nn.Cell):
"""inverted-residual block implementation"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1,
pad_type='', act_fn=relu, pw_kernel_size=1,
noskip=False, exp_ratio=1., exp_kernel_size=1, se_ratio=0.,
se_reduce_mid=False, se_gate_fn=sigmoid, shuffle_type=None,
bn_args=None, drop_connect_rate=0.):
super(InvertedResidual, self).__init__()
mid_chs = int(in_chs * exp_ratio)
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.act_fn = Swish()
self.drop_connect_rate = drop_connect_rate
self.conv_pw = _conv(in_chs, mid_chs, exp_kernel_size)
self.bn1 = _fused_bn(mid_chs, **bn_args)
self.shuffle_type = shuffle_type
if self.shuffle_type is not None and isinstance(exp_kernel_size, list):
self.shuffle = None
self.conv_dw = DepthWiseConv(mid_chs, dw_kernel_size, stride)
self.bn2 = _fused_bn(mid_chs, **bn_args)
if self.has_se:
se_base_chs = mid_chs if se_reduce_mid else in_chs
self.se = SqueezeExcite(
mid_chs, reduce_chs=max(1, int(se_base_chs * se_ratio)),
act_fn=act_fn, gate_fn=se_gate_fn
)
self.conv_pwl = _conv(mid_chs, out_chs, pw_kernel_size)
self.bn3 = _fused_bn(out_chs, **bn_args)
def construct(self, x):
"""forward the inverted-residual block"""
identity = x
x = self.conv_pw(x)
x = self.bn1(x)
x = self.act_fn(x)
x = self.conv_dw(x)
x = self.bn2(x)
x = self.act_fn(x)
if self.has_se:
x = self.se(x)
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_residual:
if self.drop_connect_rate > 0:
x = drop_connect(x, self.training, self.drop_connect_rate)
x = x + identity
return x
class GenEfficientNet(nn.Cell):
"""Generate EfficientNet architecture"""
def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=32, num_features=1280,
channel_multiplier=1.0, channel_divisor=8, channel_min=None,
pad_type='', act_fn=relu, drop_rate=0., drop_connect_rate=0.,
se_gate_fn=sigmoid, se_reduce_mid=False, bn_args=None,
global_pool='avg', head_conv='default', weight_init='goog'):
super(GenEfficientNet, self).__init__()
bn_args = _BN_ARGS_PT if bn_args is None else bn_args
self.num_classes = num_classes
self.drop_rate = drop_rate
self.num_features = num_features
self.conv_stem = _conv(in_chans, stem_size, 3,
stride=2, padding=1, pad_mode='pad')
self.bn1 = _fused_bn(stem_size, **bn_args)
self.act_fn = Swish()
in_chans = stem_size
self.blocks = BlockBuilder(in_chans, block_args, channel_multiplier,
channel_divisor, channel_min,
pad_type, act_fn, se_gate_fn, se_reduce_mid,
bn_args, drop_connect_rate, verbose=_DEBUG)
in_chs = self.blocks.in_chs
if not head_conv or head_conv == 'none':
self.efficient_head = False
self.conv_head = None
assert in_chs == self.num_features
else:
self.efficient_head = head_conv == 'efficient'
self.conv_head = _conv1x1(in_chs, self.num_features)
self.bn2 = None if self.efficient_head else _fused_bn(
self.num_features, **bn_args)
self.global_pool = P.ReduceMean(keep_dims=True)
self.classifier = _dense(self.num_features, self.num_classes)
self.reshape = P.Reshape()
self.shape = P.Shape()
self.drop_out = nn.Dropout(keep_prob=1-self.drop_rate)
def construct(self, x):
"""efficient net entry point"""
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act_fn(x)
x = self.blocks(x)
if self.efficient_head:
x = self.global_pool(x, (2, 3))
x = self.conv_head(x)
x = self.act_fn(x)
x = self.reshape(self.shape(x)[0], -1)
else:
if self.conv_head is not None:
x = self.conv_head(x)
x = self.bn2(x)
x = self.act_fn(x)
x = self.global_pool(x, (2, 3))
x = self.reshape(x, (self.shape(x)[0], -1))
if self.training and self.drop_rate > 0.:
x = self.drop_out(x)
return self.classifier(x)
def _gen_efficientnet(channel_multiplier=1.0, depth_multiplier=1.0, num_classes=1000, **kwargs):
"""Creates an EfficientNet model.
Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py
Paper: https://arxiv.org/abs/1905.11946
EfficientNet params
name: (channel_multiplier, depth_multiplier, resolution, dropout_rate)
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
Args:
channel_multiplier (int): multiplier to number of channels per layer
depth_multiplier (int): multiplier to number of repeats per stage
"""
arch_def = [
['ds_r1_k3_s1_e1_c16_se0.25'],
['ir_r2_k3_s2_e6_c24_se0.25'],
['ir_r2_k5_s2_e6_c40_se0.25'],
['ir_r3_k3_s2_e6_c80_se0.25'],
['ir_r3_k5_s1_e6_c112_se0.25'],
['ir_r4_k5_s2_e6_c192_se0.25'],
['ir_r1_k3_s1_e6_c320_se0.25'],
]
num_features = max(1280, _round_channels(
1280, channel_multiplier, 8, None))
model = GenEfficientNet(
_decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'),
num_classes=num_classes,
stem_size=32,
channel_multiplier=channel_multiplier,
num_features=num_features,
bn_args=_resolve_bn_args(kwargs),
act_fn=Swish,
**kwargs)
return model
def tinynet(sub_model="c", num_classes=1000, in_chans=3, **kwargs):
""" TinyNet Models """
# choose a sub model
r, w, d = TINYNET_CFG[sub_model]
default_cfg = default_cfgs['efficientnet_b0']
assert default_cfg['input_size'] == (3, 224, 224), "All tinynet models are \
evolved from Efficient-B0, which has input dimension of 3*224*224"
channel, height, width = default_cfg['input_size']
height = int(r * height)
width = int(r * width)
default_cfg['input_size'] = (channel, height, width)
print("Data processing configuration for current model + dataset:")
print("input_size:", default_cfg['input_size'])
print("channel mutiplier:%s, depth multiplier:%s, resolution multiplier:%s" % (w, d, r))
model = _gen_efficientnet(
channel_multiplier=w, depth_multiplier=d,
num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
return model
| 38.742331
| 125
| 0.619066
|
f2b26eff0b47da5638bbb1783944ed1f1207db2e
| 4,551
|
py
|
Python
|
hypergan/trainers/needs_pytorch/depth_trainer.py
|
limberc/HyperGAN
|
b074e74abf0ed9b81bd52084706e3707a47e0fe2
|
[
"MIT"
] | 889
|
2016-08-27T01:37:35.000Z
|
2018-10-07T19:47:56.000Z
|
hypergan/trainers/needs_pytorch/depth_trainer.py
|
limberc/HyperGAN
|
b074e74abf0ed9b81bd52084706e3707a47e0fe2
|
[
"MIT"
] | 101
|
2016-11-30T03:34:02.000Z
|
2018-10-02T13:50:52.000Z
|
hypergan/trainers/needs_pytorch/depth_trainer.py
|
limberc/HyperGAN
|
b074e74abf0ed9b81bd52084706e3707a47e0fe2
|
[
"MIT"
] | 145
|
2016-09-27T06:56:24.000Z
|
2018-09-25T16:09:28.000Z
|
import numpy as np
import hyperchamber as hc
import inspect
from hypergan.trainers.base_trainer import BaseTrainer
TINY = 1e-12
class DepthTrainer(BaseTrainer):
""" Runs an optimizer multiple times and combines the output into a mixture. """
def _create(self):
self.hist = [0 for i in range(2)]
config = self.config
self.mix_threshold_reached = False
variables = self.gan.d_vars() + self.gan.g_vars()
self.ema = [ tf.Variable(_v) for _v in variables ]
self.store_v = [ _v.assign(_v2) for _v,_v2 in zip(self.ema, variables) ]
self.combine = [ _v.assign((config.decay or 0.1) *_ema + (1.-(config.decay or 0.1))*_new) for _v, _ema, _new in zip(variables, self.ema, variables)]
self._delegate = self.gan.create_component(config.trainer, d_vars=self.d_vars, g_vars=self.g_vars)
self.reset_optimizer_t = tf.variables_initializer(self._delegate.variables())
self.depth_step = 0
self.fitness = -self.gan.loss.d_fake
self.latent = None
def required(self):
return "".split()
def _best_latent(self):
if self.latent is None:
self.latent = self.gan.session.run(self.gan.latent.sample)
fitness = self.gan.session.run(self.fitness, {self.gan.latent.sample:self.latent})
zs = self.latent
sort_zs = None
last_fitness = 10000
count = 0
while True:
d = self.gan.session.run([self.fitness,self.gan.latent.sample])
_f = d[0]
_z = d[1]
fitness = np.reshape(fitness, np.shape(_f))
fitness = np.concatenate([fitness,_f], axis=0)
zs = np.reshape(zs, np.shape(_z))
zs = np.concatenate([zs,_z], axis=0)
sort = np.argsort(fitness.flatten())[:self.gan.batch_size()]
zs = zs[sort]
fitness = fitness.flatten()[sort]
if fitness.flatten()[-1] < last_fitness:
last_fitness = fitness[-1]
count = 0
else:
count += 1
if count > self.config.heuristic:
#print("z fit ", i)
sort_zs = np.reshape(zs, np.shape(_z))
break
return sort_zs
def _step(self, feed_dict):
gan = self.gan
sess = gan.session
config = self.config
depth = self.config.depth
if depth:
if self.current_step % depth == 0:
if self.config.freeze_latent:
if self.config.freeze_latent == "best":
self.latent = self._best_latent()
else:
self.latent = self.gan.session.run(self.gan.latent.sample)
feed_dict[gan.latent.sample] = self.latent
self.before_step(self.current_step, feed_dict)
gan.session.run(self.store_v)
if self.config.reset_optimizer:
self.gan.session.run([self.reset_optimizer_t])
if self.config.freeze_latent:
feed_dict[gan.latent.sample] = self.latent
self._delegate.step(feed_dict)
if self.current_step % depth == depth - 1:
gan.session.run(self.combine)
self.after_step(self.current_step, feed_dict)
else:
if self.depth_step == 0:
if self.config.freeze_latent:
if self.config.freeze_latent == "best":
self.latent = self._best_latent()
else:
self.latent = self.gan.session.run(self.gan.latent.sample)
feed_dict[gan.latent.sample] = self.latent
self.before_step(self.current_step, feed_dict)
gan.session.run(self.store_v)
self.max_gradient_mean = 0.0
if self.config.freeze_latent:
feed_dict[gan.latent.sample] = self.latent
self._delegate.step(feed_dict)
gradient_mean = gan.session.run(gan.gradient_mean, feed_dict)
self.depth_step += 1
if gradient_mean > self.max_gradient_mean:
self.max_gradient_mean = gradient_mean
if gradient_mean/self.max_gradient_mean < (0.2 or self.config.gradient_threshold):
gan.session.run(self.combine)
self.after_step(self.current_step, feed_dict)
self.depth_step = 0
def variables(self):
return self._delegate.variables()
| 42.53271
| 156
| 0.568227
|
40f87669808daec092acc21c5cf4cc814146a2ba
| 7,724
|
py
|
Python
|
wetterdienst/additionals/geo_location.py
|
e-dism/wetterdienst
|
158e92b093536dc65d0d4450da3965f079c7d45b
|
[
"MIT"
] | 1
|
2022-01-31T14:35:46.000Z
|
2022-01-31T14:35:46.000Z
|
wetterdienst/additionals/geo_location.py
|
e-dism/wetterdienst
|
158e92b093536dc65d0d4450da3965f079c7d45b
|
[
"MIT"
] | null | null | null |
wetterdienst/additionals/geo_location.py
|
e-dism/wetterdienst
|
158e92b093536dc65d0d4450da3965f079c7d45b
|
[
"MIT"
] | null | null | null |
""" calculates the nearest weather station to a requested location"""
from _datetime import datetime
from typing import Union, Tuple, Optional
import numpy as np
import pandas as pd
import logging
from scipy.spatial import cKDTree
from wetterdienst.additionals.functions import (
check_parameters,
parse_enumeration_from_template,
cast_to_list,
)
from wetterdienst.additionals.time_handling import parse_datetime
from wetterdienst.data_models.coordinates import Coordinates
from wetterdienst.enumerations.column_names_enumeration import DWDMetaColumns
from wetterdienst.enumerations.parameter_enumeration import Parameter
from wetterdienst.enumerations.period_type_enumeration import PeriodType
from wetterdienst.enumerations.time_resolution_enumeration import TimeResolution
from wetterdienst.exceptions import InvalidParameterCombination
from wetterdienst.parse_metadata import metadata_for_climate_observations
KM_EARTH_RADIUS = 6371
logger = logging.getLogger(__name__)
def get_nearby_stations(
latitude: float,
longitude: float,
minimal_available_date: Union[datetime, str],
maximal_available_date: Union[datetime, str],
parameter: Union[Parameter, str],
time_resolution: Union[TimeResolution, str],
period_type: Union[PeriodType, str],
num_stations_nearby: Optional[int] = None,
max_distance_in_km: Optional[float] = None,
) -> pd.DataFrame:
"""
Provides a list of weather station ids for the requested data
Args:
latitude: latitude of location to search for nearest
weather station
longitude: longitude of location to search for nearest
weather station
minimal_available_date: Start date of timespan where measurements
should be available
maximal_available_date: End date of timespan where measurements
should be available
parameter: observation measure
time_resolution: frequency/granularity of measurement interval
period_type: recent or historical files
num_stations_nearby: Number of stations that should be nearby
max_distance_in_km: alternative filtering criteria, maximum
distance to location in km
Returns:
DataFrames with valid Stations in radius per requested location
"""
if (num_stations_nearby and max_distance_in_km) and (
num_stations_nearby and max_distance_in_km
):
raise ValueError("Either set 'num_stations_nearby' or 'max_distance_in_km'.")
if num_stations_nearby == 0:
raise ValueError("'num_stations_nearby' has to be at least 1.")
parameter = parse_enumeration_from_template(parameter, Parameter)
time_resolution = parse_enumeration_from_template(time_resolution, TimeResolution)
period_type = parse_enumeration_from_template(period_type, PeriodType)
minimal_available_date = (
minimal_available_date
if isinstance(minimal_available_date, datetime)
else parse_datetime(minimal_available_date)
)
maximal_available_date = (
maximal_available_date
if isinstance(maximal_available_date, datetime)
else parse_datetime(maximal_available_date)
)
if not check_parameters(parameter, time_resolution, period_type):
raise InvalidParameterCombination(
f"The combination of {parameter.value}, {time_resolution.value}, "
f"{period_type.value} is invalid."
)
coords = Coordinates(np.array(latitude), np.array(longitude))
metadata = metadata_for_climate_observations(
parameter, time_resolution, period_type
)
metadata = metadata[
(metadata[DWDMetaColumns.FROM_DATE.value] <= minimal_available_date)
& (metadata[DWDMetaColumns.TO_DATE.value] >= maximal_available_date)
].reset_index(drop=True)
# For distance filtering make normal query including all stations
if max_distance_in_km:
num_stations_nearby = metadata.shape[0]
distances, indices_nearest_neighbours = _derive_nearest_neighbours(
metadata.LAT.values, metadata.LON.values, coords, num_stations_nearby
)
# Require list of indices for consistency
# Cast to np.array required for subset
indices_nearest_neighbours = np.array(cast_to_list(indices_nearest_neighbours))
distances_km = np.array(distances * KM_EARTH_RADIUS)
# Filter for distance based on calculated distances
if max_distance_in_km:
_in_max_distance_indices = np.where(distances_km <= max_distance_in_km)[0]
indices_nearest_neighbours = indices_nearest_neighbours[
_in_max_distance_indices
]
distances_km = distances_km[_in_max_distance_indices]
metadata_location = metadata.loc[
indices_nearest_neighbours
if isinstance(indices_nearest_neighbours, (list, np.ndarray))
else [indices_nearest_neighbours],
:,
]
metadata_location["DISTANCE_TO_LOCATION"] = distances_km
if metadata_location.empty:
logger.warning(
f"No weather station was found for coordinate "
f"{latitude}°N and {longitude}°E "
)
return metadata_location
def _derive_nearest_neighbours(
latitudes_stations: np.array,
longitudes_stations: np.array,
coordinates: Coordinates,
num_stations_nearby: int = 1,
) -> Tuple[Union[float, np.ndarray], np.ndarray]:
"""
A function that uses a k-d tree algorithm to obtain the nearest
neighbours to coordinate pairs
Args:
latitudes_stations (np.array): latitude values of stations being compared to
the coordinates
longitudes_stations (np.array): longitude values of stations being compared to
the coordinates
coordinates (Coordinates): the coordinates for which the nearest neighbour
is searched
num_stations_nearby: Number of stations that should be nearby
Returns:
Tuple of distances and ranks of nearest to most distant stations
"""
points = np.c_[np.radians(latitudes_stations), np.radians(longitudes_stations)]
distance_tree = cKDTree(points)
return distance_tree.query(
coordinates.get_coordinates_in_radians(), k=num_stations_nearby
)
def stations_to_geojson(df: pd.DataFrame) -> dict:
"""
Convert DWD station information into GeoJSON format.
Args:
df: Input DataFrame containing station information.
Return:
Dictionary in GeoJSON FeatureCollection format.
"""
df = df.rename(columns=str.lower)
features = []
for _, station in df.iterrows():
features.append(
{
"type": "Feature",
"properties": {
"id": station["station_id"],
"name": station["station_name"],
"state": station["state"],
"from_date": station["from_date"].isoformat(),
"to_date": station["to_date"].isoformat(),
"has_file": station["has_file"],
},
"geometry": {
# WGS84 is implied and coordinates represent decimal degrees ordered
# as "longitude, latitude [,elevation]" with z expressed as metres
# above mean sea level per WGS84.
# -- http://wiki.geojson.org/RFC-001
"type": "Point",
"coordinates": [
station["lon"],
station["lat"],
station["station_height"],
],
},
}
)
return {
"type": "FeatureCollection",
"features": features,
}
| 36.433962
| 88
| 0.685137
|
c3e13cd3b18aa52acc9751e639b9d9ebd93c897d
| 809
|
py
|
Python
|
src/manage.py
|
nihn/restaurant
|
3fe3c44719c299ff17b84eef2a648589035e7d0e
|
[
"MIT"
] | null | null | null |
src/manage.py
|
nihn/restaurant
|
3fe3c44719c299ff17b84eef2a648589035e7d0e
|
[
"MIT"
] | null | null | null |
src/manage.py
|
nihn/restaurant
|
3fe3c44719c299ff17b84eef2a648589035e7d0e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "restaurant.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.173913
| 77
| 0.644005
|
238df20e4c2ea807d51a3c5a888c728247f3b92f
| 1,309
|
py
|
Python
|
scripts/cond_num_run.py
|
polyfem/Decoupling-Simulation-Accuracy-from-Mesh-Quality
|
955d4aabb6272c8be93728c2a91f70542506a503
|
[
"MIT"
] | 2
|
2019-09-20T12:29:08.000Z
|
2019-10-01T18:26:05.000Z
|
scripts/cond_num_run.py
|
polyfem/Decoupling-Simulation-Accuracy-from-Mesh-Quality
|
955d4aabb6272c8be93728c2a91f70542506a503
|
[
"MIT"
] | null | null | null |
scripts/cond_num_run.py
|
polyfem/Decoupling-Simulation-Accuracy-from-Mesh-Quality
|
955d4aabb6272c8be93728c2a91f70542506a503
|
[
"MIT"
] | null | null | null |
import os
import json
import subprocess
import tempfile
if __name__ == '__main__':
polyfem_exe = "./PolyFEM_bin"
out_folder = "cond_num"
n_refs = [0, 1, 2, 3]
p_refs = [False, True]
current_folder = cwd = os.getcwd()
with open("test.json", 'r') as f:
json_data = json.load(f)
for is_bad in [True, False]:
mesh = "../data/conditioning_44000_bad.mesh" if is_bad else "../data/conditioning_44000_good.mesh"
out_f = out_folder + ('bad' if is_bad else 'good')
for ref in n_refs:
for pref in p_refs:
json_data["mesh"] = mesh
json_data["n_refs"] = ref
json_data["use_p_ref"] = pref
json_data["output"] = os.path.join(os.getcwd(), out_f, "out_" + str(ref) + ("_pref" if pref else "") + ".json")
json_data["stiffness_mat_save_path"] = os.path.join(os.getcwd(), out_f, "mat_" + str(ref) + ("_pref" if pref else "") + ".json")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe, '-json', tmp_json.name, '-cmd']
subprocess.run(args)
| 33.564103
| 144
| 0.54851
|
e1d4f9fe285e7402090e9fdb7dc043268ff1499e
| 29,811
|
py
|
Python
|
mypy/checkpattern.py
|
DiddiLeija/mypy
|
40bbfb5f2539f6fc3ea8c9b4de6b62d167bb003f
|
[
"PSF-2.0"
] | null | null | null |
mypy/checkpattern.py
|
DiddiLeija/mypy
|
40bbfb5f2539f6fc3ea8c9b4de6b62d167bb003f
|
[
"PSF-2.0"
] | null | null | null |
mypy/checkpattern.py
|
DiddiLeija/mypy
|
40bbfb5f2539f6fc3ea8c9b4de6b62d167bb003f
|
[
"PSF-2.0"
] | null | null | null |
"""Pattern checker. This file is conceptually part of TypeChecker."""
from collections import defaultdict
from typing import List, Optional, Tuple, Dict, NamedTuple, Set, Union
from typing_extensions import Final
import mypy.checker
from mypy.checkmember import analyze_member_access
from mypy.expandtype import expand_type_by_instance
from mypy.join import join_types
from mypy.literals import literal_hash
from mypy.maptype import map_instance_to_supertype
from mypy.meet import narrow_declared_type
from mypy import message_registry
from mypy.messages import MessageBuilder
from mypy.nodes import Expression, ARG_POS, TypeAlias, TypeInfo, Var, NameExpr
from mypy.patterns import (
Pattern, AsPattern, OrPattern, ValuePattern, SequencePattern, StarredPattern, MappingPattern,
ClassPattern, SingletonPattern
)
from mypy.plugin import Plugin
from mypy.subtypes import is_subtype
from mypy.typeops import try_getting_str_literals_from_type, make_simplified_union, \
coerce_to_literal
from mypy.types import (
ProperType, AnyType, TypeOfAny, Instance, Type, UninhabitedType, get_proper_type,
TypedDictType, TupleType, NoneType, UnionType
)
from mypy.typevars import fill_typevars
from mypy.visitor import PatternVisitor
self_match_type_names: Final = [
"builtins.bool",
"builtins.bytearray",
"builtins.bytes",
"builtins.dict",
"builtins.float",
"builtins.frozenset",
"builtins.int",
"builtins.list",
"builtins.set",
"builtins.str",
"builtins.tuple",
]
non_sequence_match_type_names: Final = [
"builtins.str",
"builtins.bytes",
"builtins.bytearray"
]
# For every Pattern a PatternType can be calculated. This requires recursively calculating
# the PatternTypes of the sub-patterns first.
# Using the data in the PatternType the match subject and captured names can be narrowed/inferred.
PatternType = NamedTuple(
'PatternType',
[
('type', Type), # The type the match subject can be narrowed to
('rest_type', Type), # The remaining type if the pattern didn't match
('captures', Dict[Expression, Type]), # The variables captured by the pattern
])
class PatternChecker(PatternVisitor[PatternType]):
"""Pattern checker.
This class checks if a pattern can match a type, what the type can be narrowed to, and what
type capture patterns should be inferred as.
"""
# Some services are provided by a TypeChecker instance.
chk: 'mypy.checker.TypeChecker'
# This is shared with TypeChecker, but stored also here for convenience.
msg: MessageBuilder
# Currently unused
plugin: Plugin
# The expression being matched against the pattern
subject: Expression
subject_type: Type
# Type of the subject to check the (sub)pattern against
type_context: List[Type]
# Types that match against self instead of their __match_args__ if used as a class pattern
# Filled in from self_match_type_names
self_match_types: List[Type]
# Types that are sequences, but don't match sequence patterns. Filled in from
# non_sequence_match_type_names
non_sequence_match_types: List[Type]
def __init__(self,
chk: 'mypy.checker.TypeChecker',
msg: MessageBuilder, plugin: Plugin
) -> None:
self.chk = chk
self.msg = msg
self.plugin = plugin
self.type_context = []
self.self_match_types = self.generate_types_from_names(self_match_type_names)
self.non_sequence_match_types = self.generate_types_from_names(
non_sequence_match_type_names
)
def accept(self, o: Pattern, type_context: Type) -> PatternType:
self.type_context.append(type_context)
result = o.accept(self)
self.type_context.pop()
return result
def visit_as_pattern(self, o: AsPattern) -> PatternType:
current_type = self.type_context[-1]
if o.pattern is not None:
pattern_type = self.accept(o.pattern, current_type)
typ, rest_type, type_map = pattern_type
else:
typ, rest_type, type_map = current_type, UninhabitedType(), {}
if not is_uninhabited(typ) and o.name is not None:
typ, _ = self.chk.conditional_types_with_intersection(current_type,
[get_type_range(typ)],
o,
default=current_type)
if not is_uninhabited(typ):
type_map[o.name] = typ
return PatternType(typ, rest_type, type_map)
def visit_or_pattern(self, o: OrPattern) -> PatternType:
current_type = self.type_context[-1]
#
# Check all the subpatterns
#
pattern_types = []
for pattern in o.patterns:
pattern_type = self.accept(pattern, current_type)
pattern_types.append(pattern_type)
current_type = pattern_type.rest_type
#
# Collect the final type
#
types = []
for pattern_type in pattern_types:
if not is_uninhabited(pattern_type.type):
types.append(pattern_type.type)
#
# Check the capture types
#
capture_types: Dict[Var, List[Tuple[Expression, Type]]] = defaultdict(list)
# Collect captures from the first subpattern
for expr, typ in pattern_types[0].captures.items():
node = get_var(expr)
capture_types[node].append((expr, typ))
# Check if other subpatterns capture the same names
for i, pattern_type in enumerate(pattern_types[1:]):
vars = {get_var(expr) for expr, _ in pattern_type.captures.items()}
if capture_types.keys() != vars:
self.msg.fail(message_registry.OR_PATTERN_ALTERNATIVE_NAMES, o.patterns[i])
for expr, typ in pattern_type.captures.items():
node = get_var(expr)
capture_types[node].append((expr, typ))
captures: Dict[Expression, Type] = {}
for var, capture_list in capture_types.items():
typ = UninhabitedType()
for _, other in capture_list:
typ = join_types(typ, other)
captures[capture_list[0][0]] = typ
union_type = make_simplified_union(types)
return PatternType(union_type, current_type, captures)
def visit_value_pattern(self, o: ValuePattern) -> PatternType:
current_type = self.type_context[-1]
typ = self.chk.expr_checker.accept(o.expr)
typ = coerce_to_literal(typ)
narrowed_type, rest_type = self.chk.conditional_types_with_intersection(
current_type,
[get_type_range(typ)],
o,
default=current_type
)
return PatternType(narrowed_type, rest_type, {})
def visit_singleton_pattern(self, o: SingletonPattern) -> PatternType:
current_type = self.type_context[-1]
value: Union[bool, None] = o.value
if isinstance(value, bool):
typ = self.chk.expr_checker.infer_literal_expr_type(value, "builtins.bool")
elif value is None:
typ = NoneType()
else:
assert False
narrowed_type, rest_type = self.chk.conditional_types_with_intersection(
current_type,
[get_type_range(typ)],
o,
default=current_type
)
return PatternType(narrowed_type, rest_type, {})
def visit_sequence_pattern(self, o: SequencePattern) -> PatternType:
#
# check for existence of a starred pattern
#
current_type = get_proper_type(self.type_context[-1])
if not self.can_match_sequence(current_type):
return self.early_non_match()
star_positions = [i for i, p in enumerate(o.patterns) if isinstance(p, StarredPattern)]
star_position: Optional[int] = None
if len(star_positions) == 1:
star_position = star_positions[0]
elif len(star_positions) >= 2:
assert False, "Parser should prevent multiple starred patterns"
required_patterns = len(o.patterns)
if star_position is not None:
required_patterns -= 1
#
# get inner types of original type
#
if isinstance(current_type, TupleType):
inner_types = current_type.items
size_diff = len(inner_types) - required_patterns
if size_diff < 0:
return self.early_non_match()
elif size_diff > 0 and star_position is None:
return self.early_non_match()
else:
inner_type = self.get_sequence_type(current_type)
if inner_type is None:
inner_type = self.chk.named_type("builtins.object")
inner_types = [inner_type] * len(o.patterns)
#
# match inner patterns
#
contracted_new_inner_types: List[Type] = []
contracted_rest_inner_types: List[Type] = []
captures: Dict[Expression, Type] = {}
contracted_inner_types = self.contract_starred_pattern_types(inner_types,
star_position,
required_patterns)
can_match = True
for p, t in zip(o.patterns, contracted_inner_types):
pattern_type = self.accept(p, t)
typ, rest, type_map = pattern_type
if is_uninhabited(typ):
can_match = False
else:
contracted_new_inner_types.append(typ)
contracted_rest_inner_types.append(rest)
self.update_type_map(captures, type_map)
new_inner_types = self.expand_starred_pattern_types(contracted_new_inner_types,
star_position,
len(inner_types))
rest_inner_types = self.expand_starred_pattern_types(contracted_rest_inner_types,
star_position,
len(inner_types))
#
# Calculate new type
#
new_type: Type
rest_type: Type = current_type
if not can_match:
new_type = UninhabitedType()
elif isinstance(current_type, TupleType):
narrowed_inner_types = []
inner_rest_types = []
for inner_type, new_inner_type in zip(inner_types, new_inner_types):
narrowed_inner_type, inner_rest_type = \
self.chk.conditional_types_with_intersection(
new_inner_type,
[get_type_range(inner_type)],
o,
default=new_inner_type
)
narrowed_inner_types.append(narrowed_inner_type)
inner_rest_types.append(inner_rest_type)
if all(not is_uninhabited(typ) for typ in narrowed_inner_types):
new_type = TupleType(narrowed_inner_types, current_type.partial_fallback)
else:
new_type = UninhabitedType()
if all(is_uninhabited(typ) for typ in inner_rest_types):
# All subpatterns always match, so we can apply negative narrowing
rest_type = TupleType(rest_inner_types, current_type.partial_fallback)
else:
new_inner_type = UninhabitedType()
for typ in new_inner_types:
new_inner_type = join_types(new_inner_type, typ)
new_type = self.construct_sequence_child(current_type, new_inner_type)
if is_subtype(new_type, current_type):
new_type, _ = self.chk.conditional_types_with_intersection(
current_type,
[get_type_range(new_type)],
o,
default=current_type
)
else:
new_type = current_type
return PatternType(new_type, rest_type, captures)
def get_sequence_type(self, t: Type) -> Optional[Type]:
t = get_proper_type(t)
if isinstance(t, AnyType):
return AnyType(TypeOfAny.from_another_any, t)
if isinstance(t, UnionType):
items = [self.get_sequence_type(item) for item in t.items]
not_none_items = [item for item in items if item is not None]
if len(not_none_items) > 0:
return make_simplified_union(not_none_items)
else:
return None
if self.chk.type_is_iterable(t) and isinstance(t, Instance):
return self.chk.iterable_item_type(t)
else:
return None
def contract_starred_pattern_types(self,
types: List[Type],
star_pos: Optional[int],
num_patterns: int
) -> List[Type]:
"""
Contracts a list of types in a sequence pattern depending on the position of a starred
capture pattern.
For example if the sequence pattern [a, *b, c] is matched against types [bool, int, str,
bytes] the contracted types are [bool, Union[int, str], bytes].
If star_pos in None the types are returned unchanged.
"""
if star_pos is None:
return types
new_types = types[:star_pos]
star_length = len(types) - num_patterns
new_types.append(make_simplified_union(types[star_pos:star_pos+star_length]))
new_types += types[star_pos+star_length:]
return new_types
def expand_starred_pattern_types(self,
types: List[Type],
star_pos: Optional[int],
num_types: int
) -> List[Type]:
"""Undoes the contraction done by contract_starred_pattern_types.
For example if the sequence pattern is [a, *b, c] and types [bool, int, str] are extended
to length 4 the result is [bool, int, int, str].
"""
if star_pos is None:
return types
new_types = types[:star_pos]
star_length = num_types - len(types) + 1
new_types += [types[star_pos]] * star_length
new_types += types[star_pos+1:]
return new_types
def visit_starred_pattern(self, o: StarredPattern) -> PatternType:
captures: Dict[Expression, Type] = {}
if o.capture is not None:
list_type = self.chk.named_generic_type('builtins.list', [self.type_context[-1]])
captures[o.capture] = list_type
return PatternType(self.type_context[-1], UninhabitedType(), captures)
def visit_mapping_pattern(self, o: MappingPattern) -> PatternType:
current_type = get_proper_type(self.type_context[-1])
can_match = True
captures: Dict[Expression, Type] = {}
for key, value in zip(o.keys, o.values):
inner_type = self.get_mapping_item_type(o, current_type, key)
if inner_type is None:
can_match = False
inner_type = self.chk.named_type("builtins.object")
pattern_type = self.accept(value, inner_type)
if is_uninhabited(pattern_type.type):
can_match = False
else:
self.update_type_map(captures, pattern_type.captures)
if o.rest is not None:
mapping = self.chk.named_type("typing.Mapping")
if is_subtype(current_type, mapping) and isinstance(current_type, Instance):
mapping_inst = map_instance_to_supertype(current_type, mapping.type)
dict_typeinfo = self.chk.lookup_typeinfo("builtins.dict")
rest_type = Instance(dict_typeinfo, mapping_inst.args)
else:
object_type = self.chk.named_type("builtins.object")
rest_type = self.chk.named_generic_type("builtins.dict",
[object_type, object_type])
captures[o.rest] = rest_type
if can_match:
# We can't narrow the type here, as Mapping key is invariant.
new_type = self.type_context[-1]
else:
new_type = UninhabitedType()
return PatternType(new_type, current_type, captures)
def get_mapping_item_type(self,
pattern: MappingPattern,
mapping_type: Type,
key: Expression
) -> Optional[Type]:
local_errors = self.msg.clean_copy()
local_errors.disable_count = 0
mapping_type = get_proper_type(mapping_type)
if isinstance(mapping_type, TypedDictType):
result: Optional[Type] = self.chk.expr_checker.visit_typeddict_index_expr(
mapping_type, key, local_errors=local_errors)
# If we can't determine the type statically fall back to treating it as a normal
# mapping
if local_errors.is_errors():
local_errors = self.msg.clean_copy()
local_errors.disable_count = 0
result = self.get_simple_mapping_item_type(pattern,
mapping_type,
key,
local_errors)
if local_errors.is_errors():
result = None
else:
result = self.get_simple_mapping_item_type(pattern,
mapping_type,
key,
local_errors)
return result
def get_simple_mapping_item_type(self,
pattern: MappingPattern,
mapping_type: Type,
key: Expression,
local_errors: MessageBuilder
) -> Type:
result, _ = self.chk.expr_checker.check_method_call_by_name('__getitem__',
mapping_type,
[key],
[ARG_POS],
pattern,
local_errors=local_errors)
return result
def visit_class_pattern(self, o: ClassPattern) -> PatternType:
current_type = get_proper_type(self.type_context[-1])
#
# Check class type
#
type_info = o.class_ref.node
if type_info is None:
return PatternType(AnyType(TypeOfAny.from_error), AnyType(TypeOfAny.from_error), {})
if isinstance(type_info, TypeAlias) and not type_info.no_args:
self.msg.fail(message_registry.CLASS_PATTERN_GENERIC_TYPE_ALIAS, o)
return self.early_non_match()
if isinstance(type_info, TypeInfo):
any_type = AnyType(TypeOfAny.implementation_artifact)
typ: Type = Instance(type_info, [any_type] * len(type_info.defn.type_vars))
elif isinstance(type_info, TypeAlias):
typ = type_info.target
else:
if isinstance(type_info, Var):
name = str(type_info.type)
else:
name = type_info.name
self.msg.fail(message_registry.CLASS_PATTERN_TYPE_REQUIRED.format(name), o.class_ref)
return self.early_non_match()
new_type, rest_type = self.chk.conditional_types_with_intersection(
current_type, [get_type_range(typ)], o, default=current_type
)
if is_uninhabited(new_type):
return self.early_non_match()
# TODO: Do I need this?
narrowed_type = narrow_declared_type(current_type, new_type)
#
# Convert positional to keyword patterns
#
keyword_pairs: List[Tuple[Optional[str], Pattern]] = []
match_arg_set: Set[str] = set()
captures: Dict[Expression, Type] = {}
if len(o.positionals) != 0:
if self.should_self_match(typ):
if len(o.positionals) > 1:
self.msg.fail(message_registry.CLASS_PATTERN_TOO_MANY_POSITIONAL_ARGS, o)
pattern_type = self.accept(o.positionals[0], narrowed_type)
if not is_uninhabited(pattern_type.type):
return PatternType(pattern_type.type,
join_types(rest_type, pattern_type.rest_type),
pattern_type.captures)
captures = pattern_type.captures
else:
local_errors = self.msg.clean_copy()
match_args_type = analyze_member_access("__match_args__", typ, o,
False, False, False,
local_errors,
original_type=typ,
chk=self.chk)
if local_errors.is_errors():
self.msg.fail(message_registry.MISSING_MATCH_ARGS.format(typ), o)
return self.early_non_match()
proper_match_args_type = get_proper_type(match_args_type)
if isinstance(proper_match_args_type, TupleType):
match_arg_names = get_match_arg_names(proper_match_args_type)
if len(o.positionals) > len(match_arg_names):
self.msg.fail(message_registry.CLASS_PATTERN_TOO_MANY_POSITIONAL_ARGS, o)
return self.early_non_match()
else:
match_arg_names = [None] * len(o.positionals)
for arg_name, pos in zip(match_arg_names, o.positionals):
keyword_pairs.append((arg_name, pos))
if arg_name is not None:
match_arg_set.add(arg_name)
#
# Check for duplicate patterns
#
keyword_arg_set = set()
has_duplicates = False
for key, value in zip(o.keyword_keys, o.keyword_values):
keyword_pairs.append((key, value))
if key in match_arg_set:
self.msg.fail(
message_registry.CLASS_PATTERN_KEYWORD_MATCHES_POSITIONAL.format(key),
value
)
has_duplicates = True
elif key in keyword_arg_set:
self.msg.fail(message_registry.CLASS_PATTERN_DUPLICATE_KEYWORD_PATTERN.format(key),
value)
has_duplicates = True
keyword_arg_set.add(key)
if has_duplicates:
return self.early_non_match()
#
# Check keyword patterns
#
can_match = True
for keyword, pattern in keyword_pairs:
key_type: Optional[Type] = None
local_errors = self.msg.clean_copy()
if keyword is not None:
key_type = analyze_member_access(keyword,
narrowed_type,
pattern,
False,
False,
False,
local_errors,
original_type=new_type,
chk=self.chk)
else:
key_type = AnyType(TypeOfAny.from_error)
if local_errors.is_errors() or key_type is None:
key_type = AnyType(TypeOfAny.from_error)
self.msg.fail(message_registry.CLASS_PATTERN_UNKNOWN_KEYWORD.format(typ, keyword),
pattern)
inner_type, inner_rest_type, inner_captures = self.accept(pattern, key_type)
if is_uninhabited(inner_type):
can_match = False
else:
self.update_type_map(captures, inner_captures)
if not is_uninhabited(inner_rest_type):
rest_type = current_type
if not can_match:
new_type = UninhabitedType()
return PatternType(new_type, rest_type, captures)
def should_self_match(self, typ: Type) -> bool:
typ = get_proper_type(typ)
if isinstance(typ, Instance) and typ.type.is_named_tuple:
return False
for other in self.self_match_types:
if is_subtype(typ, other):
return True
return False
def can_match_sequence(self, typ: ProperType) -> bool:
if isinstance(typ, UnionType):
return any(self.can_match_sequence(get_proper_type(item)) for item in typ.items)
for other in self.non_sequence_match_types:
# We have to ignore promotions, as memoryview should match, but bytes,
# which it can be promoted to, shouldn't
if is_subtype(typ, other, ignore_promotions=True):
return False
sequence = self.chk.named_type("typing.Sequence")
# If the static type is more general than sequence the actual type could still match
return is_subtype(typ, sequence) or is_subtype(sequence, typ)
def generate_types_from_names(self, type_names: List[str]) -> List[Type]:
types: List[Type] = []
for name in type_names:
try:
types.append(self.chk.named_type(name))
except KeyError as e:
# Some built in types are not defined in all test cases
if not name.startswith('builtins.'):
raise e
pass
return types
def update_type_map(self,
original_type_map: Dict[Expression, Type],
extra_type_map: Dict[Expression, Type]
) -> None:
# Calculating this would not be needed if TypeMap directly used literal hashes instead of
# expressions, as suggested in the TODO above it's definition
already_captured = set(literal_hash(expr) for expr in original_type_map)
for expr, typ in extra_type_map.items():
if literal_hash(expr) in already_captured:
node = get_var(expr)
self.msg.fail(message_registry.MULTIPLE_ASSIGNMENTS_IN_PATTERN.format(node.name),
expr)
else:
original_type_map[expr] = typ
def construct_sequence_child(self, outer_type: Type, inner_type: Type) -> Type:
"""
If outer_type is a child class of typing.Sequence returns a new instance of
outer_type, that is a Sequence of inner_type. If outer_type is not a child class of
typing.Sequence just returns a Sequence of inner_type
For example:
construct_sequence_child(List[int], str) = List[str]
"""
proper_type = get_proper_type(outer_type)
if isinstance(proper_type, UnionType):
types = [
self.construct_sequence_child(item, inner_type) for item in proper_type.items
if self.can_match_sequence(get_proper_type(item))
]
return make_simplified_union(types)
sequence = self.chk.named_generic_type("typing.Sequence", [inner_type])
if is_subtype(outer_type, self.chk.named_type("typing.Sequence")):
proper_type = get_proper_type(outer_type)
assert isinstance(proper_type, Instance)
empty_type = fill_typevars(proper_type.type)
partial_type = expand_type_by_instance(empty_type, sequence)
return expand_type_by_instance(partial_type, proper_type)
else:
return sequence
def early_non_match(self) -> PatternType:
return PatternType(UninhabitedType(), self.type_context[-1], {})
def get_match_arg_names(typ: TupleType) -> List[Optional[str]]:
args: List[Optional[str]] = []
for item in typ.items:
values = try_getting_str_literals_from_type(item)
if values is None or len(values) != 1:
args.append(None)
else:
args.append(values[0])
return args
def get_var(expr: Expression) -> Var:
"""
Warning: this in only true for expressions captured by a match statement.
Don't call it from anywhere else
"""
assert isinstance(expr, NameExpr)
node = expr.node
assert isinstance(node, Var)
return node
def get_type_range(typ: Type) -> 'mypy.checker.TypeRange':
typ = get_proper_type(typ)
if (isinstance(typ, Instance)
and typ.last_known_value
and isinstance(typ.last_known_value.value, bool)):
typ = typ.last_known_value
return mypy.checker.TypeRange(typ, is_upper_bound=False)
def is_uninhabited(typ: Type) -> bool:
return isinstance(get_proper_type(typ), UninhabitedType)
| 42.225212
| 99
| 0.577874
|
b8a0a5e628e447031646e37e0535690b73487545
| 36,993
|
py
|
Python
|
scipy/optimize/tests/test_optimize.py
|
mandli/scipy
|
ce90df2874c39595ef69a586a3e7fdd9cb9b6f48
|
[
"BSD-3-Clause"
] | 1
|
2016-02-20T13:49:40.000Z
|
2016-02-20T13:49:40.000Z
|
scipy/optimize/tests/test_optimize.py
|
mandli/scipy
|
ce90df2874c39595ef69a586a3e7fdd9cb9b6f48
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/optimize/tests/test_optimize.py
|
mandli/scipy
|
ce90df2874c39595ef69a586a3e7fdd9cb9b6f48
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Unit tests for optimization routines from optimize.py and tnc.py
Authors:
Ed Schofield, Nov 2005
Andrew Straw, April 2008
To run it in its simplest form::
nosetests test_optimize.py
"""
from numpy.testing import assert_raises, assert_allclose, \
assert_equal, assert_, TestCase, run_module_suite
from scipy import optimize
import numpy as np
from math import pow
class TestOptimize(TestCase):
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def setUp(self):
self.F = np.array([[1,1,1],[1,1,0],[1,0,1],[1,0,0],[1,0,0]])
self.K = np.array([1., 0.3, 0.5])
self.startparams = np.zeros(3, np.float64)
self.solution = np.array([0., -0.524869316, 0.487525860])
self.maxiter = 1000
self.funccalls = 0
self.gradcalls = 0
self.trace = []
def func(self, x):
self.funccalls += 1
if self.funccalls > 6000:
raise RuntimeError("too many iterations in optimization routine")
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
f = logZ - np.dot(self.K, x)
self.trace.append(x)
return f
def grad(self, x):
self.gradcalls += 1
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.transpose(), p) - self.K
def hess(self, x):
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.T,
np.dot(np.diag(p), self.F - np.dot(self.F.T, p)))
def hessp(self, x, p):
return np.dot(self.hess(x), p)
def test_cg(self, use_wrapper=False):
""" conjugate gradient optimization routine """
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='CG', jac=self.grad,
options=opts)
params, fopt, func_calls, grad_calls, warnflag = \
res['x'], res['fun'], res['nfev'], res['njev'], res['status']
else:
retval = optimize.fmin_cg(self.func, self.startparams, self.grad, (),
maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 9, self.funccalls)
assert_(self.gradcalls == 7, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[2:4],
[[0, -0.5, 0.5],
[0, -5.05700028e-01, 4.95985862e-01]],
atol=1e-14, rtol=1e-7)
def test_bfgs(self, use_wrapper=False):
""" Broyden-Fletcher-Goldfarb-Shanno optimization routine """
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
res = optimize.minimize(self.func, self.startparams,
jac=self.grad, method='BFGS', args=(),
options=opts)
params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = \
res['x'], res['fun'], res['jac'], res['hess'], \
res['nfev'], res['njev'], res['status']
else:
retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 10, self.funccalls)
assert_(self.gradcalls == 8, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[6:8],
[[0, -5.25060743e-01, 4.87748473e-01],
[0, -5.24885582e-01, 4.87530347e-01]],
atol=1e-14, rtol=1e-7)
def test_bfgs_nan(self):
"""Test corner case where nan is fed to optimizer. See #1542."""
func = lambda x: x
fprime = lambda x: np.ones_like(x)
x0 = [np.nan]
olderr = np.seterr(over='ignore')
try:
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(np.isnan(func(x)))
finally:
np.seterr(**olderr)
def test_bfgs_numerical_jacobian(self):
""" BFGS with numerical jacobian and a vector epsilon parameter """
# define the epsilon parameter using a random vector
epsilon = np.sqrt(np.finfo(float).eps) * np.random.rand(len(self.solution))
params = optimize.fmin_bfgs(self.func, self.startparams,
epsilon=epsilon, args=(),
maxiter=self.maxiter, disp=False)
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_bfgs_infinite(self, use_wrapper=False):
"""Test corner case where -Inf is the minimum. See #1494."""
func = lambda x: -np.e**-x
fprime = lambda x: -func(x)
x0 = [0]
olderr = np.seterr(over='ignore')
try:
if use_wrapper:
opts = {'disp': False}
x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
args=(), options=opts)['x']
else:
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(not np.isfinite(func(x)))
finally:
np.seterr(**olderr)
def test_powell(self, use_wrapper=False):
""" Powell (direction set) optimization routine
"""
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Powell', options=opts)
params, fopt, direc, numiter, func_calls, warnflag = \
res['x'], res['fun'], res['direc'], res['nit'], \
res['nfev'], res['status']
else:
retval = optimize.fmin_powell(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, direc, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
#
# However, some leeway must be added: the exact evaluation
# count is sensitive to numerical error, and floating-point
# computations are not bit-for-bit reproducible across
# machines, and when using e.g. MKL, data alignment
# etc. affect the rounding error.
#
assert_(self.funccalls <= 116 + 20, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[34:39],
[[ 0.72949016, -0.44156936, 0.47100962],
[ 0.72949016, -0.44156936, 0.48052496],
[ 1.45898031, -0.88313872, 0.95153458],
[ 0.72949016, -0.44156936, 0.47576729],
[ 1.72949016, -0.44156936, 0.47576729]],
atol=1e-14, rtol=1e-7)
def test_neldermead(self, use_wrapper=False):
""" Nelder-Mead simplex algorithm
"""
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
params, fopt, numiter, func_calls, warnflag = \
res['x'], res['fun'], res['nit'], res['nfev'], \
res['status']
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 167, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[76:78],
[[0.1928968 , -0.62780447, 0.35166118],
[0.19572515, -0.63648426, 0.35838135]],
atol=1e-14, rtol=1e-7)
def test_ncg(self, use_wrapper=False):
""" line-search Newton conjugate gradient optimization routine
"""
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=False, disp=False,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
#assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
#assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hess(self, use_wrapper=False):
""" Newton conjugate gradient with Hessian """
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hess = self.hess,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess = self.hess,
args=(), maxiter=self.maxiter,
full_output=False, disp=False,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
#assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
#assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hessp(self, use_wrapper=False):
""" Newton conjugate gradient with Hessian times a vector p """
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hessp = self.hessp,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess_p = self.hessp,
args=(), maxiter=self.maxiter,
full_output=False, disp=False,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
#assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
#assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_l_bfgs_b(self):
""" limited-memory bound-constrained BFGS algorithm
"""
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
self.grad, args=(),
maxfun=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls == 5, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[0. , -0.52489628, 0.48753042],
[0. , -0.52489628, 0.48753042]],
atol=1e-14, rtol=1e-7)
def test_l_bfgs_b_numjac(self):
""" L-BFGS-B with numerical jacobian """
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
approx_grad=True,
maxfun=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_l_bfgs_b_funjac(self):
""" L-BFGS-B with combined objective function and jacobian """
def fun(x):
return self.func(x), self.grad(x)
retval = optimize.fmin_l_bfgs_b(fun, self.startparams,
maxfun=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_minimize_l_bfgs_b(self):
""" Minimize with L-BFGS-B method """
opts = {'disp': False, 'maxiter': self.maxiter}
x = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)['x']
assert_allclose(self.func(x), self.func(self.solution),
atol=1e-6)
def test_minimize_l_bfgs_b_ftol(self):
# Check that the `ftol` parameter in l_bfgs_b works as expected
v0 = None
for tol in [1e-1, 1e-4, 1e-7, 1e-10]:
opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol}
sol = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)
v = self.func(sol.x)
if v0 is None:
v0 = v
else:
assert_(v < v0)
assert_allclose(v, self.func(self.solution), rtol=tol)
def test_minimize(self):
"""Tests for the minimize wrapper."""
self.setUp()
self.test_bfgs(True)
self.setUp()
self.test_bfgs_infinite(True)
self.setUp()
self.test_cg(True)
self.setUp()
self.test_ncg(True)
self.setUp()
self.test_ncg_hess(True)
self.setUp()
self.test_ncg_hessp(True)
self.setUp()
self.test_neldermead(True)
self.setUp()
self.test_powell(True)
def test_minimize_tol_parameter(self):
# Check that the minimize() tol= argument does something
def func(z):
x, y = z
return x**2*y**2 + x**4 + 1
def dfunc(z):
x, y = z
return np.array([2*x*y**2 + 4*x**3, 2*x**2*y])
for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
'newton-cg', 'anneal', 'l-bfgs-b', 'tnc',
'cobyla', 'slsqp']:
if method in ('nelder-mead', 'powell', 'anneal', 'cobyla'):
jac = None
else:
jac = dfunc
sol1 = optimize.minimize(func, [1,1], jac=jac, tol=1e-10,
method=method)
sol2 = optimize.minimize(func, [1,1], jac=jac, tol=1.0,
method=method)
assert_(func(sol1.x) < func(sol2.x),
"%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x)))
class TestLBFGSBBounds(TestCase):
""" Tests for L-BFGS-B with bounds """
def setUp(self):
self.bounds = ((1, None), (None, None))
self.solution = (1, 0)
def fun(self, x, p=2.0):
return 1.0 / p * (x[0]**p + x[1]**p)
def jac(self, x, p=2.0):
return x**(p - 1)
def fj(self, x, p=2.0):
return self.fun(x, p), self.jac(x, p)
def test_l_bfgs_b_bounds(self):
""" L-BFGS-B with bounds """
x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1],
fprime=self.jac,
bounds=self.bounds)
assert_(d['warnflag'] == 0, d['task'])
assert_allclose(x, self.solution, atol=1e-6)
def test_l_bfgs_b_funjac(self):
""" L-BFGS-B with fun and jac combined and extra arguments """
x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ),
bounds=self.bounds)
assert_(d['warnflag'] == 0, d['task'])
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_l_bfgs_b_bounds(self):
""" Minimize with method='L-BFGS-B' with bounds """
res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
jac=self.jac, bounds=self.bounds)
assert_(res['success'], res['message'])
assert_allclose(res.x, self.solution, atol=1e-6)
class TestOptimizeScalar(TestCase):
"""Tests for scalar optimizers"""
def setUp(self):
self.solution = 1.5
def fun(self, x, a=1.5):
"""Objective function"""
return (x - a)**2 - 0.8
def test_brent(self):
""" brent algorithm """
x = optimize.brent(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack = (-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack = (-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
def test_golden(self):
""" golden algorithm """
x = optimize.golden(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, brack = (-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.golden(self.fun, brack = (-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
def test_fminbound(self):
"""Test fminbound """
x = optimize.fminbound(self.fun, 0, 1)
assert_allclose(x, 1, atol=1e-4)
x = optimize.fminbound(self.fun, 1, 5)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1)
def test_fminbound_scalar(self):
assert_raises(ValueError, optimize.fminbound, self.fun,
np.zeros(2), 1)
x = optimize.fminbound(self.fun, 1, np.array(5))
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_scalar(self):
# combine all tests above for the minimize_scalar wrapper
x = optimize.minimize_scalar(self.fun).x
assert_allclose(x, self.solution, atol=1e-6)
x= optimize.minimize_scalar(self.fun, bracket = (-3, -2),
args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
x= optimize.minimize_scalar(self.fun, method='Brent',
args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
x= optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket = (-3, -2),
args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='golden',
args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,),
method='Bounded').x
assert_allclose(x, 1, atol=1e-4)
x= optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
x= optimize.minimize_scalar(self.fun, bounds=(np.array([1]),
np.array([5])),
args=(np.array([1.5]), ),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
bounds=(5, 1), method='bounded', args=(1.5, ))
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
bounds=(np.zeros(2), 1), method='bounded', args=(1.5, ))
x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
class TestTnc(TestCase):
"""TNC non-linear optimization.
These tests are taken from Prof. K. Schittkowski's test examples
for constrained non-linear programming.
http://www.uni-bayreuth.de/departments/math/~kschittkowski/home.htm
"""
def setUp(self):
# options for minimize
self.opts = {'disp': False, 'maxiter': 200}
# objective functions and jacobian for each test
def f1(self, x, a=100.0):
return a * pow((x[1] - pow(x[0], 2)), 2) + pow(1.0 - x[0], 2)
def g1(self, x, a=100.0):
dif = [0, 0]
dif[1] = 2 * a * (x[1] - pow(x[0], 2))
dif[0] = -2.0 * (x[0] * (dif[1] - 1.0) + 1.0)
return dif
def fg1(self, x, a=100.0):
return self.f1(x, a), self.g1(x, a)
def f3(self, x):
return x[1] + pow(x[1] - x[0], 2) * 1.0e-5
def g3(self, x):
dif = [0,0]
dif[0] = -2.0 * (x[1] - x[0]) * 1.0e-5
dif[1] = 1.0 - dif[0]
return dif
def fg3(self, x):
return self.f3(x), self.g3(x)
def f4(self, x):
return pow(x[0] + 1.0, 3) / 3.0 + x[1]
def g4(self, x):
dif = [0,0]
dif[0] = pow(x[0] + 1.0, 2)
dif[1] = 1.0
return dif
def fg4(self, x):
return self.f4(x), self.g4(x)
def f5(self, x):
return np.sin(x[0] + x[1]) + pow(x[0] - x[1], 2) - \
1.5 * x[0] + 2.5 * x[1] + 1.0
def g5(self, x):
dif = [0,0]
v1 = np.cos(x[0] + x[1])
v2 = 2.0*(x[0] - x[1])
dif[0] = v1 + v2 - 1.5
dif[1] = v1 - v2 + 2.5
return dif
def fg5(self, x):
return self.f5(x), self.g5(x)
def f38(self, x):
return (100.0 * pow(x[1] - pow(x[0], 2), 2) +
pow(1.0 - x[0], 2) + 90.0 * pow(x[3] - pow(x[2], 2), 2) +
pow(1.0 - x[2], 2) + 10.1 * (pow(x[1] - 1.0, 2) +
pow(x[3] - 1.0, 2)) +
19.8 * (x[1] - 1.0) * (x[3] - 1.0)) * 1.0e-5
def g38(self, x):
dif = [0, 0, 0, 0]
dif[0] = (-400.0 * x[0] * (x[1] - pow(x[0], 2)) -
2.0 * (1.0 - x[0])) * 1.0e-5
dif[1] = (200.0 * (x[1] - pow(x[0], 2)) + 20.2 * (x[1] - 1.0) +
19.8 * (x[3] - 1.0)) * 1.0e-5
dif[2] = ( - 360.0 * x[2] * (x[3] - pow(x[2], 2)) -
2.0 * (1.0 - x[2])) * 1.0e-5
dif[3] = (180.0 * (x[3] - pow(x[2], 2)) + 20.2 * (x[3] - 1.0) +
19.8 * (x[1] - 1.0)) * 1.0e-5
return dif
def fg38(self, x):
return self.f38(x), self.g38(x)
def f45(self, x):
return 2.0 - x[0] * x[1] * x[2] * x[3] * x[4] / 120.0
def g45(self, x):
dif = [0] * 5
dif[0] = - x[1] * x[2] * x[3] * x[4] / 120.0
dif[1] = - x[0] * x[2] * x[3] * x[4] / 120.0
dif[2] = - x[0] * x[1] * x[3] * x[4] / 120.0
dif[3] = - x[0] * x[1] * x[2] * x[4] / 120.0
dif[4] = - x[0] * x[1] * x[2] * x[3] / 120.0
return dif
def fg45(self, x):
return self.f45(x), self.g45(x)
# tests
# minimize with method=TNC
def test_minimize_tnc1(self):
"""Minimize, method=TNC, 1"""
x0, bnds = [-2, 1], ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x = optimize.minimize(self.f1, x0, method='TNC',
jac=self.g1, bounds=bnds,
options=self.opts).x
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)
def test_minimize_tnc1b(self):
"""Minimize, method=TNC, 1b (approx gradient)"""
x0, bnds = [-2, 1], ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x = optimize.minimize(self.f1, x0, method='TNC',
bounds=bnds, options=self.opts).x
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4)
def test_minimize_tnc1c(self):
"""Minimize, method=TNC, 1c (combined function and gradient)"""
x0, bnds = [-2, 1], ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x = optimize.minimize(self.fg1, x0, method='TNC',
jac=True, bounds=bnds,
options=self.opts).x
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)
def test_minimize_tnc2(self):
"""Minimize, method=TNC, 2"""
x0, bnds = [-2, 1], ([-np.inf, None], [1.5, None])
xopt = [-1.2210262419616387, 1.5]
x = optimize.minimize(self.f1, x0, method='TNC',
jac=self.g1, bounds=bnds,
options=self.opts).x
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)
def test_minimize_tnc3(self):
"""Minimize, method=TNC, 3"""
x0, bnds = [10, 1], ([-np.inf, None], [0.0, None])
xopt = [0, 0]
x = optimize.minimize(self.f3, x0, method='TNC',
jac=self.g3, bounds=bnds,
options=self.opts).x
assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8)
def test_minimize_tnc4(self):
"""Minimize, method=TNC, 4"""
x0 ,bnds = [1.125,0.125], [(1, None), (0, None)]
xopt = [1, 0]
x = optimize.minimize(self.f4, x0, method='TNC',
jac=self.g4, bounds=bnds,
options=self.opts).x
assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8)
def test_minimize_tnc5(self):
"""Minimize, method=TNC, 5"""
x0, bnds = [0, 0], [(-1.5, 4),(-3, 3)]
xopt = [-0.54719755119659763, -1.5471975511965976]
x = optimize.minimize(self.f5, x0, method='TNC',
jac=self.g5, bounds=bnds,
options=self.opts).x
assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8)
def test_minimize_tnc38(self):
"""Minimize, method=TNC, 38"""
x0, bnds = np.array([-3, -1, -3, -1]), [(-10, 10)]*4
xopt = [1]*4
x = optimize.minimize(self.f38, x0, method='TNC',
jac=self.g38, bounds=bnds,
options=self.opts).x
assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8)
def test_minimize_tnc45(self):
"""Minimize, method=TNC, 45"""
x0, bnds = [2] * 5, [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)]
xopt = [1, 2, 3, 4, 5]
x = optimize.minimize(self.f45, x0, method='TNC',
jac=self.g45, bounds=bnds,
options=self.opts).x
assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8)
# fmin_tnc
def test_tnc1(self):
" TNC: test 1"
fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, args=(100.0, ),
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc1b(self):
" TNC: test 1 (approx. gradient)"
x, bounds = [-2, 1], ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x, nf, rc = optimize.fmin_tnc(self.f1, x, approx_grad=True,
bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc1c(self):
" TNC: test 1 (separate fprime)"
x, bounds = [-2, 1], ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x, nf, rc = optimize.fmin_tnc(self.f1, x, fprime=self.g1,
bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc2(self):
" TNC: test 2"
fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [1.5, None])
xopt = [-1.2210262419616387, 1.5]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc3(self):
" TNC: test 3"
fg, x, bounds = self.fg3, [10, 1], ([-np.inf, None], [0.0, None])
xopt = [0, 0]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc4(self):
" TNC: test 4"
fg, x, bounds = self.fg4, [1.125,0.125], [(1, None), (0, None)]
xopt = [1, 0]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc5(self):
" TNC: test 5"
fg, x, bounds = self.fg5, [0, 0], [(-1.5, 4),(-3, 3)]
xopt = [-0.54719755119659763, -1.5471975511965976]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc38(self):
" TNC: test 38"
fg, x, bounds = self.fg38, np.array([-3, -1, -3, -1]), [(-10, 10)]*4
xopt = [1]*4
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc45(self):
" TNC: test 45"
fg, x, bounds = self.fg45, [2] * 5, [(0, 1), (0, 2), (0, 3),
(0, 4), (0, 5)]
xopt = [1, 2, 3, 4, 5]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
class TestRosen(TestCase):
def test_hess(self):
"""Compare rosen_hess(x) times p with rosen_hess_prod(x,p) (ticket #1248)"""
x = np.array([3, 4, 5])
p = np.array([2, 2, 2])
hp = optimize.rosen_hess_prod(x, p)
dothp = np.dot(optimize.rosen_hess(x), p)
assert_equal(hp, dothp)
if __name__ == "__main__":
run_module_suite()
| 39.564706
| 84
| 0.497608
|
400d66a9a4b434f74b1ee895fe636957e1778c97
| 1,357
|
py
|
Python
|
models/StereoCNN/resnet.py
|
daili0015/ModelFeast
|
0689ced4d0f37be438d3a91908e5e4cc5b7d54b8
|
[
"MIT"
] | 247
|
2019-03-05T07:12:29.000Z
|
2022-03-29T01:51:17.000Z
|
models/StereoCNN/resnet.py
|
jungerschwarz/ModelFeast
|
03afca0b129532135910ee2ac72a3b85be795289
|
[
"MIT"
] | 8
|
2019-05-21T03:05:27.000Z
|
2021-12-09T03:22:51.000Z
|
models/StereoCNN/resnet.py
|
jungerschwarz/ModelFeast
|
03afca0b129532135910ee2ac72a3b85be795289
|
[
"MIT"
] | 47
|
2019-03-05T07:14:13.000Z
|
2021-11-11T01:04:28.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: zcy
# @Date: 2019-02-15 15:00:10
# @Last Modified by: zcy
# @Last Modified time: 2019-02-15 15:07:57
from models.StereoCNN.Resnet_module import *
__all__ = [
'resnet10_3d', 'resnet18_3d', 'resnet34_3d',
'resnet101_3d', 'resnet152_3d', 'resnet200_3d'
]
def resnet10_3d(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
return model
def resnet18_3d(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34_3d(**kwargs):
"""Constructs a ResNet-34 model.
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50_3d(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101_3d(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152_3d(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
def resnet200_3d(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)
return model
| 21.203125
| 56
| 0.607959
|
e1c80101baf71d2902e63ac8019e46c65e22f2c4
| 5,604
|
py
|
Python
|
pandasgui/widgets/json_viewer.py
|
felipeescallon/pandas-GUI
|
40327cd2763d830e761475df00d62b8cb29c3438
|
[
"MIT"
] | 1
|
2021-04-14T03:06:14.000Z
|
2021-04-14T03:06:14.000Z
|
pandasgui/widgets/json_viewer.py
|
felipeescallon/pandas-GUI
|
40327cd2763d830e761475df00d62b8cb29c3438
|
[
"MIT"
] | null | null | null |
pandasgui/widgets/json_viewer.py
|
felipeescallon/pandas-GUI
|
40327cd2763d830e761475df00d62b8cb29c3438
|
[
"MIT"
] | null | null | null |
import collections
import json
import sys
from typing import Union
from PyQt5 import QtCore, QtGui, QtWidgets
from pandasgui.utility import summarize_json
class JsonViewer(QtWidgets.QWidget):
def __init__(self, jdata: Union[list, dict], parent=None):
super().__init__(parent)
self.find_box = QtWidgets.QLineEdit()
self.find_box.returnPressed.connect(self.find)
self.find_box.textChanged.connect(self.find)
self.find_box.setPlaceholderText("Find")
self.tree_widget = QtWidgets.QTreeWidget()
self.tree_widget.setHeaderLabels(["Key", "Value"])
self.tree_widget.header().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
root_item = self.tree_widget.invisibleRootItem()
self.recurse_jdata(jdata, root_item)
self.tree_widget.addTopLevelItem(root_item)
self.tree_widget.expandAll()
self.expand_all = QtWidgets.QPushButton("Expand All")
self.expand_all.clicked.connect(self.tree_widget.expandAll)
self.collapse_all = QtWidgets.QPushButton("Collapse All")
self.collapse_all.clicked.connect(self.tree_widget.collapseAll)
top_section = QtWidgets.QHBoxLayout()
top_section.addWidget(self.find_box)
top_section.addWidget(self.expand_all)
top_section.addWidget(self.collapse_all)
main_view_layout = QtWidgets.QVBoxLayout()
main_view_layout.addLayout(top_section)
main_view_layout.addWidget(self.tree_widget)
main_view = QtWidgets.QWidget()
main_view.setLayout(main_view_layout)
summary_view = QtWidgets.QTextEdit()
summary_view.setReadOnly(True)
summary_view.setText(summarize_json(jdata))
font = QtGui.QFont("Monospace")
font.setStyleHint(QtGui.QFont.TypeWriter)
summary_view.setFont(font)
self.tabs = QtWidgets.QTabWidget()
self.tabs.addTab(main_view, "Viewer")
self.tabs.addTab(summary_view, "Structure")
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.tabs)
self.setLayout(layout)
self.resize(QtCore.QSize(400, 500))
def find(self):
text = self.find_box.text()
if text == "":
self.tree_widget.clearSelection()
return
result = []
for col in [0, 1]:
result += self.tree_widget.findItems(text,
QtCore.Qt.MatchRegExp | QtCore.Qt.MatchRecursive,
col)
self.tree_widget.clearSelection()
self.tree_widget.setSelectionMode(self.tree_widget.MultiSelection)
for item in result:
item.setSelected(True)
self.tree_widget.setSelectionMode(self.tree_widget.ExtendedSelection)
def recurse_jdata(self, jdata, tree_widget):
if isinstance(jdata, dict):
items = jdata.items()
elif isinstance(jdata, list):
items = [(str(i), val) for i, val in enumerate(jdata)]
else:
raise ValueError(f"Expected dict or list, instead got {type(jdata)}")
for key, val in items:
text_list = []
if isinstance(val, dict) or isinstance(val, list):
text_list.append(key)
row_item = QtWidgets.QTreeWidgetItem([key])
self.recurse_jdata(val, row_item)
else:
text_list.append(key)
text_list.append(str(val))
row_item = QtWidgets.QTreeWidgetItem([key, str(val)])
tree_widget.addChild(row_item)
if "__main__" == __name__:
app = QtWidgets.QApplication([])
example1 = [{'apiVersion': 3,
'details': {'date': '2012-04-23T18:25:43.511Z', 'userCount': 3},
'users': [
{'id': 'jross',
'firstName': "Jeff",
'lastName': "Reeves",
'messages': [
{'content': "Hello",
'date_posted': "2012-04-23T18:25:43.511Z"},
{'content': "I finished the thing",
'date_posted': "2012-04-23T18:29:43.511Z"},
{'content': "Here it is",
'date_posted': "2012-04-23T18:30:43.511Z",
'error': "Failed to send message"},
]},
{'id': 'sbank',
'firstName': "Steve",
'lastName': "Banks",
'messages': [
{'content': "Hi",
'date_posted': "2012-04-23T18:26:43.511Z"},
]},
{'id': 'bscot',
'firstName': "Bob",
'messages': []},
]}]
json_viewer = JsonViewer(example1)
json_viewer.show()
import requests
examples = []
for url in ['https://jsonplaceholder.typicode.com/posts', # 100 posts
'https://jsonplaceholder.typicode.com/comments', # 500 comments
'https://jsonplaceholder.typicode.com/albums', # 100 albums
'https://jsonplaceholder.typicode.com/photos', # 5000 photos
'https://jsonplaceholder.typicode.com/todos', # 200 todos
'https://jsonplaceholder.typicode.com/users', # 10 users
]:
data = requests.get(url).json()
x = JsonViewer(data)
x.show()
examples.append(x)
sys.exit(app.exec_())
| 35.694268
| 98
| 0.560849
|
dd979129517e92efc183561c72a38acca4b943e4
| 9,068
|
py
|
Python
|
rl_coach/architectures/mxnet_components/heads/head.py
|
jl45621/coach
|
9a895a1ac73aff44b2e6eb8e4d01e8ec35ceb084
|
[
"Apache-2.0"
] | 1,960
|
2017-10-19T10:31:24.000Z
|
2020-11-07T18:19:23.000Z
|
rl_coach/architectures/mxnet_components/heads/head.py
|
jl45621/coach
|
9a895a1ac73aff44b2e6eb8e4d01e8ec35ceb084
|
[
"Apache-2.0"
] | 349
|
2017-10-21T17:17:18.000Z
|
2020-10-17T13:39:56.000Z
|
rl_coach/architectures/mxnet_components/heads/head.py
|
jl45621/coach
|
9a895a1ac73aff44b2e6eb8e4d01e8ec35ceb084
|
[
"Apache-2.0"
] | 428
|
2017-10-21T01:32:58.000Z
|
2020-11-07T13:49:49.000Z
|
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Dict, List, Union, Tuple
import mxnet as mx
from mxnet.initializer import Initializer, register
from mxnet.gluon import nn, loss
from mxnet.ndarray import NDArray
from mxnet.symbol import Symbol
from rl_coach.base_parameters import AgentParameters
from rl_coach.spaces import SpacesDefinition
LOSS_OUT_TYPE_LOSS = 'loss'
LOSS_OUT_TYPE_REGULARIZATION = 'regularization'
@register
class NormalizedRSSInitializer(Initializer):
"""
Standardizes Root Sum of Squares along the input channel dimension.
Used for Dense layer weight matrices only (ie. do not use on Convolution kernels).
MXNet Dense layer weight matrix is of shape (out_ch, in_ch), so standardize across axis 1.
Root Sum of Squares set to `rss`, which is 1.0 by default.
Called `normalized_columns_initializer` in TensorFlow backend (but we work with rows instead of columns for MXNet).
"""
def __init__(self, rss=1.0):
super(NormalizedRSSInitializer, self).__init__(rss=rss)
self.rss = float(rss)
def _init_weight(self, name, arr):
mx.nd.random.normal(0, 1, out=arr)
sample_rss = arr.square().sum(axis=1).sqrt()
scalers = self.rss / sample_rss
arr *= scalers.expand_dims(1)
class LossInputSchema(object):
"""
Helper class to contain schema for loss hybrid_forward input
"""
def __init__(self, head_outputs: List[str], agent_inputs: List[str], targets: List[str]):
"""
:param head_outputs: list of argument names in hybrid_forward that are outputs of the head.
The order and number MUST MATCH the output from the head.
:param agent_inputs: list of argument names in hybrid_forward that are inputs from the agent.
The order and number MUST MATCH `output_<head_type_idx>_<order>` for this head.
:param targets: list of argument names in hybrid_forward that are targets for the loss.
The order and number MUST MATCH targets passed from the agent.
"""
self._head_outputs = head_outputs
self._agent_inputs = agent_inputs
self._targets = targets
@property
def head_outputs(self):
return self._head_outputs
@property
def agent_inputs(self):
return self._agent_inputs
@property
def targets(self):
return self._targets
class HeadLoss(loss.Loss):
"""
ABC for loss functions of each head. Child class must implement input_schema() and loss_forward()
"""
def __init__(self, *args, **kwargs):
super(HeadLoss, self).__init__(*args, **kwargs)
self._output_schema = None # type: List[str]
@property
def input_schema(self) -> LossInputSchema:
"""
:return: schema for input of hybrid_forward. Read docstring for LossInputSchema for details.
"""
raise NotImplementedError
@property
def output_schema(self) -> List[str]:
"""
:return: schema for output of hybrid_forward. Must contain 'loss' and 'regularization' keys at least once.
The order and total number must match that of returned values from the loss. 'loss' and 'regularization'
are special keys. Any other string is treated as auxiliary outputs and must include match auxiliary
fetch names returned by the head.
"""
return self._output_schema
def forward(self, *args):
"""
Override forward() so that number of outputs can be checked against the schema
"""
outputs = super(HeadLoss, self).forward(*args)
if isinstance(outputs, tuple) or isinstance(outputs, list):
num_outputs = len(outputs)
else:
assert isinstance(outputs, NDArray) or isinstance(outputs, Symbol)
num_outputs = 1
assert num_outputs == len(self.output_schema), "Number of outputs don't match schema ({} != {})".format(
num_outputs, len(self.output_schema))
return outputs
def _loss_output(self, outputs: List[Tuple[Union[NDArray, Symbol], str]]):
"""
Must be called on the output from hybrid_forward().
Saves the returned output as the schema and returns output values in a list
:return: list of output values
"""
output_schema = [o[1] for o in outputs]
assert self._output_schema is None or self._output_schema == output_schema
self._output_schema = output_schema
return tuple(o[0] for o in outputs)
def hybrid_forward(self, F, x, *args, **kwargs):
"""
Passes the cal to loss_forward() and constructs output schema from its output by calling loss_output()
"""
return self._loss_output(self.loss_forward(F, x, *args, **kwargs))
def loss_forward(self, F, x, *args, **kwargs) -> List[Tuple[Union[NDArray, Symbol], str]]:
"""
Similar to hybrid_forward, but returns list of (NDArray, type_str)
"""
raise NotImplementedError
class Head(nn.HybridBlock):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition,
network_name: str, head_type_idx: int=0, loss_weight: float=1., is_local: bool=True,
activation_function: str='relu', dense_layer: None=None):
"""
A head is the final part of the network. It takes the embedding from the middleware embedder and passes it
through a neural network to produce the output of the network. There can be multiple heads in a network, and
each one has an assigned loss function. The heads are algorithm dependent.
:param agent_parameters: containing algorithm parameters such as clip_likelihood_ratio_using_epsilon
and beta_entropy.
:param spaces: containing action spaces used for defining size of network output.
:param network_name: name of head network. currently unused.
:param head_type_idx: index of head network. currently unused.
:param loss_weight: scalar used to adjust relative weight of loss (if using this loss with others).
:param is_local: flag to denote if network is local. currently unused.
:param activation_function: activation function to use between layers. currently unused.
:param dense_layer: type of dense layer to use in network. currently unused.
"""
super(Head, self).__init__()
self.head_type_idx = head_type_idx
self.network_name = network_name
self.loss_weight = loss_weight
self.is_local = is_local
self.ap = agent_parameters
self.spaces = spaces
self.return_type = None
self.activation_function = activation_function
self.dense_layer = dense_layer
self._num_outputs = None
def loss(self) -> HeadLoss:
"""
Returns loss block to be used for specific head implementation.
:return: loss block (can be called as function) for outputs returned by the head network.
"""
raise NotImplementedError()
@property
def num_outputs(self):
""" Returns number of outputs that forward() call will return
:return:
"""
assert self._num_outputs is not None, 'must call forward() once to configure number of outputs'
return self._num_outputs
def forward(self, *args):
"""
Override forward() so that number of outputs can be automatically set
"""
outputs = super(Head, self).forward(*args)
if isinstance(outputs, tuple):
num_outputs = len(outputs)
else:
assert isinstance(outputs, NDArray) or isinstance(outputs, Symbol)
num_outputs = 1
if self._num_outputs is None:
self._num_outputs = num_outputs
else:
assert self._num_outputs == num_outputs, 'Number of outputs cannot change ({} != {})'.format(
self._num_outputs, num_outputs)
assert self._num_outputs == len(self.loss().input_schema.head_outputs)
return outputs
def hybrid_forward(self, F, x, *args, **kwargs):
"""
Used for forward pass through head network.
:param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized).
:param x: middleware state representation, of shape (batch_size, in_channels).
:return: final output of network, that will be used in loss calculations.
"""
raise NotImplementedError()
| 41.031674
| 119
| 0.672364
|
5b0484683b83109b3d359ba25e33c7cd0ac8296a
| 16,079
|
py
|
Python
|
log_mito/model_633.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_mito/model_633.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_mito/model_633.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 158250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
| 87.863388
| 710
| 0.80347
|
1b7f823746f28cc5a29e5681813c41480bbf6eb6
| 37,533
|
py
|
Python
|
Cartwheel/lib/Python26/Lib/site-packages/wx-2.8-msw-unicode/wx/tools/Editra/src/Editra.py
|
MontyThibault/centre-of-mass-awareness
|
58778f148e65749e1dfc443043e9fc054ca3ff4d
|
[
"MIT"
] | null | null | null |
Cartwheel/lib/Python26/Lib/site-packages/wx-2.8-msw-unicode/wx/tools/Editra/src/Editra.py
|
MontyThibault/centre-of-mass-awareness
|
58778f148e65749e1dfc443043e9fc054ca3ff4d
|
[
"MIT"
] | null | null | null |
Cartwheel/lib/Python26/Lib/site-packages/wx-2.8-msw-unicode/wx/tools/Editra/src/Editra.py
|
MontyThibault/centre-of-mass-awareness
|
58778f148e65749e1dfc443043e9fc054ca3ff4d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
###############################################################################
# Name: Editra.py #
# Purpose: Implements Editras App object and the Main method #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2008 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
This module defines the Editra Application object and the Main method for
running Editra.
@summary: Editra's main application object and MainLoop
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: Editra.py 60549 2009-05-08 01:49:32Z CJP $"
__revision__ = "$Revision: 60549 $"
#--------------------------------------------------------------------------#
# Dependancies
import os
import sys
import base64
import locale
import time
import getopt
import shutil
import wx
# The event handler mixin is now part of wxPython proper, but there hasn't
# been an official release with it yet, so try to import the official module
# but fallback to our own copy if it fails.
try:
import wx.lib.eventStack as events
except:
import extern.events as events
# Due to some methods that were added in 2.8.3 being used in a large number
# of places Editra has become incompatable with wxPython 2.8.1.1 and earlier.
# TODO: add a message dialog to display this, or look into the cause of the
# issues of using wxversion on windows.
if not hasattr(sys, 'frozen') and wx.VERSION < (2, 8, 3, ''):
print "VersionError: Editra requires wxPython 2.8.3 or higher"
print " Your version is %s" % wx.VERSION_STRING
# Try and import a system installed version of pkg_resources else fallback to
# the one bundled with Editra's source.
try:
from pkg_resources import resource_filename
except ImportError:
from extern.pkg_resources import resource_filename
# Editra Libraries
import ed_glob
import ed_i18n
import profiler
import util
import dev_tool
import ed_main
import ed_art
import ed_txt
import ed_event
import updater
import plugin
import ed_ipc
import ed_msg
#--------------------------------------------------------------------------#
# Global Variables
ID_UPDATE_CHECK = wx.NewId()
# Commands (here temporarly)
APP_CMD_OPEN_WINDOW = u"Editra.OpenWindow"
_ = wx.GetTranslation
#--------------------------------------------------------------------------#
class Editra(wx.App, events.AppEventHandlerMixin):
"""The Editra Application Object
@deprecated: L{GetMainWindow}
"""
def __init__(self, *args, **kargs):
"""Initialize that main app and its attributes
@postcondition: application is created and ready to be run in mainloop
"""
wx.App.__init__(self, *args, **kargs)
events.AppEventHandlerMixin.__init__(self)
# Attributes
self._log = dev_tool.DEBUGP
self._lock = False
self._windows = dict()
# Disable debug popups
wx.Log.EnableLogging(False)
if ed_glob.SINGLE:
# Setup the instance checker
instance_name = u"%s-%s" % (self.GetAppName(), wx.GetUserId())
self._instance = wx.SingleInstanceChecker(instance_name)
if self._instance.IsAnotherRunning():
try:
opts, args = getopt.getopt(sys.argv[1:], "dhv",
['debug', 'help', 'version'])
except getopt.GetoptError, msg:
self._log("[app][err] %s" % str(msg))
args = list()
if not len(args):
args.append(APP_CMD_OPEN_WINDOW)
rval = ed_ipc.SendCommands(args, profiler.Profile_Get('SESSION_KEY'))
# If sending the command failed then let the editor startup
# a new instance
if not rval:
self._isfirst = True
else:
self._log("[app][info] Starting Ipc server...")
# Set the session key and save it to the users profile so
# that other instances can access the server
key = unicode(base64.b64encode(os.urandom(8), 'zZ'))
key = wx.GetUserName() + key
profiler.Profile_Set('SESSION_KEY', key)
profiler.Profile_Set('ISBINARY', hasattr(sys, 'frozen'))
path = profiler.Profile_Get('MYPROFILE')
profiler.TheProfile.Write(path)
try:
self._server = ed_ipc.EdIpcServer(self, profiler.Profile_Get('SESSION_KEY'))
self._server.start()
except Exception, msg:
self._log("[app][err] Failed to start ipc server")
self._log("[app][err] %s" % str(msg))
self._server = None
self._isfirst = True
else:
self._isfirst = True
# Setup Plugins after locale as they may have resource that need to
# be loaded.
if self._isfirst:
self._pluginmgr = plugin.PluginManager()
self._log("[app][info] Registering Editra's ArtProvider")
wx.ArtProvider.PushProvider(ed_art.EditraArt())
def AddMessageCatalog(self, name, path):
"""Add a catalog lookup path to the app
@param name: name of catalog (i.e 'projects')
@param path: catalog lookup path
"""
if self.locale is not None:
path = resource_filename(path, 'locale')
self.locale.AddCatalogLookupPathPrefix(path)
self.locale.AddCatalog(name)
def OnInit(self):
"""Initialize the Editor
@note: this gets called before __init__
@postcondition: custom artprovider and plugins are loaded
"""
self.SetAppName(ed_glob.PROG_NAME)
self._log = dev_tool.DEBUGP
self._log("[app][info] Editra is Initializing")
# Load user preferences
self.profile_updated = InitConfig()
self._isfirst = False # Is the first instance
self._instance = None
# Setup Locale
locale.setlocale(locale.LC_ALL, '')
self.locale = wx.Locale(ed_i18n.GetLangId(profiler.Profile_Get('LANG')))
if self.locale.GetCanonicalName() in ed_i18n.GetAvailLocales():
self.locale.AddCatalogLookupPathPrefix(ed_glob.CONFIG['LANG_DIR'])
self.locale.AddCatalog(ed_glob.PROG_NAME)
else:
del self.locale
self.locale = None
# Check and set encoding if necessary
if not profiler.Profile_Get('ENCODING'):
profiler.Profile_Set('ENCODING', locale.getpreferredencoding())
# Setup the Error Reporter
if profiler.Profile_Get('REPORTER', 'bool', True):
sys.excepthook = dev_tool.ExceptionHook
#---- Bind Events ----#
self.Bind(wx.EVT_ACTIVATE_APP, self.OnActivate)
self.Bind(wx.EVT_MENU, self.OnNewWindow, id=ed_glob.ID_NEW_WINDOW)
self.Bind(wx.EVT_MENU, self.OnCloseWindow)
self.Bind(ed_event.EVT_NOTIFY, self.OnNotify)
self.Bind(ed_ipc.EVT_COMMAND_RECV, self.OnCommandRecieved)
# Splash a warning if version is not a final version
if profiler.Profile_Get('APPSPLASH'):
import edimage
splash_img = edimage.splashwarn.GetBitmap()
self.splash = wx.SplashScreen(splash_img, wx.SPLASH_CENTRE_ON_PARENT | \
wx.SPLASH_NO_TIMEOUT, 0, None, wx.ID_ANY)
self.splash.Show()
return True
def Destroy(self):
"""Destroy the application"""
try:
# Cleanup the instance checker
del self._instance
except AttributeError:
pass
wx.App.Destroy(self)
def DestroySplash(self):
"""Destroy the splash screen"""
# If is created and not dead already
if getattr(self, 'splash', None) is not None and \
isinstance(self.splash, wx.SplashScreen):
self.splash.Destroy()
self.splash = None
def Exit(self, force=False):
"""Exit the program
@postcondition: If no toplevel windows are present program will exit.
@postcondition: Program may remain open if an open window is locking.
"""
self._pluginmgr.WritePluginConfig()
profiler.TheProfile.Write(profiler.Profile_Get('MYPROFILE'))
if not self._lock or force:
if hasattr(self, 'server'):
self.server.ShutDown()
try:
# Cleanup the instance checker
del self._instance
except AttributeError:
pass
# Exit the app
wx.App.ExitMainLoop(self)
def GetLocaleObject(self):
"""Get the locale object owned by this app. Use this method to add
extra catalogs for lookup.
@return: wx.Locale or None
"""
return self.locale
def GetLog(self):
"""Returns the logging function used by the app
@return: the logging function of this program instance
"""
return self._log
def GetMainWindow(self):
"""Returns reference to the instance of the MainWindow
that is running if available, and None if not.
@return: the L{MainWindow} of this app if it is open
"""
self._log("[app][warn] Editra::GetMainWindow is deprecated")
for window in self._windows:
if not hasattr(self._windows[window][0], '__name__'):
continue
if self._windows[window][0].__name__ == "MainWindow":
return self._windows[window][0]
return None
def GetActiveWindow(self):
"""Returns the active main window if there is one else it will
just return some main window or none if there are no main windows
@return: frame instance or None
"""
awin = None
for win in self.GetMainWindows():
if win.IsActive():
awin = win
break
if awin is None:
awin = self.GetTopWindow()
if awin is None or getattr(awin, '__name__', '?') != "MainWindow":
if len(self.GetMainWindows()):
awin = self.GetMainWindows()[0]
return awin
def GetCurrentBuffer(self):
"""Get the current buffer from the active window or None
@return: EditraStc
"""
win = self.GetTopWindow()
if getattr(win, '__name__', None) != u"MainWindow":
win = self.GetActiveWindow()
if win is None:
return win
return win.GetNotebook().GetCurrentCtrl()
def GetMainWindows(self):
"""Returns a list of all open main windows
@return: list of L{MainWindow} instances of this app (list may be empty)
"""
mainw = list()
for window in self._windows:
try:
if self._windows[window][0].__name__ == "MainWindow":
mainw.append(self._windows[window][0])
except AttributeError:
continue
return mainw
def GetOpenWindows(self):
"""Returns a list of open windows
@return: list of all open windows owned by app
"""
return self._windows
def GetPluginManager(self):
"""Returns the plugin manager used by this application
@return: Apps plugin manager
@see: L{plugin}
"""
return self._pluginmgr
def GetProfileUpdated(self):
"""Was the profile updated
@return: bool
"""
return self.profile_updated
def GetWindowInstance(self, wintype):
"""Get an instance of an open window if one exists
@param wintype: Class type of window to look for
@precondition: Window must have called L{RegisterWindow}
@return: Instance of window or None
"""
for win in self._windows:
if isinstance(self._windows[win][0], wintype):
return self._windows[win][0]
return None
def IsLocked(self):
"""Returns whether the application is locked or not
@return: whether a window has locked the app from closing or not
"""
return self._lock
def IsOnlyInstance(self):
"""Check if this app is the the first instance that is running
@return: bool
"""
return self._isfirst
def Lock(self):
"""Locks the app from exiting
@postcondition: program is locked from exiting
"""
self._lock = True
def MacNewFile(self):
"""Stub for future use"""
pass
def MacOpenFile(self, filename):
"""Macintosh Specific code for opening files that are associated
with the editor and double clicked on after the editor is already
running.
@param filename: file path string
@postcondition: if L{MainWindow} is open file will be opened in notebook
"""
window = self.GetTopWindow()
if getattr(window, '__name__', '') == "MainWindow":
try:
self._log("[app][info] MacOpenFile Fired")
encoding = sys.getfilesystemencoding()
window.DoOpen(ed_glob.ID_COMMAND_LINE_OPEN,
ed_txt.DecodeString(filename, encoding))
# Make sure the window is brought to the front
if window.IsIconized():
window.Iconize(False)
window.Raise()
except Exception, msg:
self._log("[app][err] Failed to open drop file: %s" % str(msg))
pass
else:
pass
def MacPrintFile(self, filename):
"""Stub for future use
@param filename: file to print
"""
pass
def MacReopenApp(self):
"""Handle kAEReopenApplication when dock icons is clicked on"""
frame = self.GetTopWindow()
if frame is not None:
if frame.IsIconized():
frame.Iconize(False)
frame.Raise()
def OnActivate(self, evt):
"""Activation Event Handler
@param evt: event that called this handler
@type evt: wx.ActivateEvent
"""
if evt.GetActive():
self._log("[app][info] I'm Awake!!")
# frame = self.GetTopWindow()
# if frame is not None:
# if frame.IsIconized():
# frame.Iconize(False)
# frame.Raise()
else:
self._log("[app][info] Going to sleep")
evt.Skip()
def OnExit(self, evt=None, force=False):
"""Handle application exit request
@param evt: event that called this handler
"""
e_id = -1
if evt:
e_id = evt.GetId()
if e_id == ed_glob.ID_EXIT:
# First loop is to ensure current top window is
# closed first
for win in self.GetMainWindows():
if win.IsActive():
result = win.Close()
if result:
break
return
for win in self.GetMainWindows():
win.Raise()
result = win.Close()
if not result:
break
self.Exit(force)
else:
if evt:
evt.Skip()
def OnNewWindow(self, evt):
"""Create a new editing window
@param evt: wx.EVT_MENU
"""
if evt.GetId() == ed_glob.ID_NEW_WINDOW:
frame = evt.GetEventObject().GetMenuBar().GetFrame()
self.OpenNewWindow(caller=frame)
else:
evt.Skip()
def OnCommandRecieved(self, evt):
"""Recieve commands from the IPC server
@todo: move command processing into own module
"""
cmds = evt.GetCommands()
for cmdstr in cmds:
if u"::" in cmdstr:
target, cmd = cmdstr.split(u"::")
if target == u"Cmd.EditraStc":
cbuf = self.GetCurrentBuffer()
if cbuf is not None and hasattr(cbuf, cmd):
try:
getattr(cbuf, cmd)()
except:
self._log("[app][err] Invalid Command %s" % cmdstr)
else:
if cmdstr == APP_CMD_OPEN_WINDOW:
self.OpenNewWindow()
elif len(cmdstr):
self.MacOpenFile(cmdstr)
else:
self._log("[app][warn] Unknown Command %s" % cmdstr)
def OnCloseWindow(self, evt):
"""Close the currently active window
@param evt: wx.MenuEvent
"""
if evt.GetId() in [ed_glob.ID_CLOSE, ed_glob.ID_CLOSE_WINDOW]:
for window in wx.GetTopLevelWindows():
if hasattr(window, 'IsActive') and window.IsActive():
if hasattr(window, 'Close'):
window.Close()
break
else:
evt.Skip()
def OpenNewWindow(self, fname=u'', caller=None):
"""Open a new window
@keyword fname: Open a file in the new window
@return: the new window
"""
frame = ed_main.MainWindow(None, wx.ID_ANY,
profiler.Profile_Get('WSIZE'),
ed_glob.PROG_NAME)
if caller:
pos = caller.GetPosition()
frame.SetPosition((pos.x + 22, pos.y + 22))
self.RegisterWindow(repr(frame), frame, True)
self.SetTopWindow(frame)
if isinstance(fname, basestring) and fname != u'':
frame.DoOpen(ed_glob.ID_COMMAND_LINE_OPEN, fname)
frame.Show(True)
# Ensure frame gets an Activate event when shown
# this doesn't happen automatically on windows
if wx.Platform == '__WXMSW__':
wx.PostEvent(frame, wx.ActivateEvent(wx.wxEVT_ACTIVATE, True))
return frame
def OnNotify(self, evt):
"""Handle notification events
@param evt: L{ed_event.NotificationEvent}
"""
e_val = evt.GetValue()
if evt.GetId() == ID_UPDATE_CHECK and \
isinstance(e_val, tuple) and e_val[0]:
self.DestroySplash()
mdlg = wx.MessageDialog(self.GetActiveWindow(),
_("An updated version of Editra is available\n"
"Would you like to download Editra %s now?") %\
e_val[1], _("Update Available"),
wx.YES_NO|wx.YES_DEFAULT|wx.CENTER|wx.ICON_INFORMATION)
if mdlg.ShowModal() == wx.ID_YES:
dl_dlg = updater.DownloadDialog(None, wx.ID_ANY,
_("Downloading Update"))
dp_sz = wx.GetDisplaySize()
dl_dlg.SetPosition(((dp_sz[0] - (dl_dlg.GetSize()[0] + 5)), 25))
dl_dlg.Show()
mdlg.Destroy()
else:
evt.Skip()
def RegisterWindow(self, name, window, can_lock=False):
"""Registers winows with the app. The name should be the
repr of window. The can_lock parameter is a boolean stating
whether the window can keep the main app running after the
main frame has exited.
@param name: name of window
@param window: reference to window object
@keyword can_lock: whether window can lock exit or not
"""
self._windows[name] = (window, can_lock)
def ReloadArtProvider(self):
"""Reloads the custom art provider onto the artprovider stack
@postcondition: artprovider is removed and reloaded
"""
try:
wx.ArtProvider.PopProvider()
finally:
wx.ArtProvider.PushProvider(ed_art.EditraArt())
def UnLock(self):
"""Unlocks the application
@postcondition: application is unlocked so it can exit
"""
self._lock = False
def UnRegisterWindow(self, name):
"""Unregisters a named window with the app if the window
was the top window and if other windows that can lock are
registered in the window stack it will promote the next one
it finds to be the top window. If no windows that fit this
criteria are found it will close the application.
@param name: name of window to unregister
"""
if name in self._windows:
self._windows.pop(name)
cur_top = self.GetTopWindow()
if not len(self._windows):
self._log("[app][info] No more open windows shutting down")
self.Exit()
return
if name == repr(cur_top):
found = False
for key in self._windows:
if self._windows[key][1]:
self._log("[app][info] Promoting %s to top" % key)
try:
self.SetTopWindow(self._windows[key][0])
except Exception:
continue
found = True
break
if not found:
self._log("[app][info] No more top windows exiting app")
self.UnLock()
self.Exit()
else:
self._log("[app][info] UnRegistered %s" % name)
else:
self._log("[app][warn] The window %s is not registered" % name)
def WindowCanLock(self, winname):
"""Checks if a named window can lock the application or
not. The window must have been previously registered with
a call to RegisterWindow for this function to have any
real usefullness.
@param winname: name of window to query
"""
if winname in self._windows:
return self._windows[winname][1]
else:
self._log("[app][warn] the window %s has "
"not been registered" % winname)
return False
#--------------------------------------------------------------------------#
def InitConfig():
"""Initializes the configuration data
@postcondition: all configuration data is set
"""
# Check if a custom config directory was specified on the commandline
if ed_glob.CONFIG['CONFIG_BASE'] is not None:
# TODO: there is a bug when the first time the config is created
# where the settings will not be saved until second launching.
config_base = os.path.abspath(ed_glob.CONFIG['CONFIG_BASE'])
else:
# Look for a profile directory on the system level. If this directory
# exists Use it instead of the user one. This will allow for running
# Editra from a portable drive or for system administrators to enforce
# settings on a system installed version.
config_base = util.ResolvConfigDir(u'.Editra', True)
if os.path.exists(config_base):
ed_glob.CONFIG['CONFIG_BASE'] = config_base
ed_glob.CONFIG['PROFILE_DIR'] = os.path.join(config_base, u"profiles")
ed_glob.CONFIG['PROFILE_DIR'] += os.sep
else:
config_base = wx.StandardPaths.Get().GetUserDataDir()
ed_glob.CONFIG['PROFILE_DIR'] = util.ResolvConfigDir(u"profiles")
# Check for if config directory exists and if profile is from the current
# running version of Editra.
profile_updated = False
if util.HasConfigDir() and os.path.exists(ed_glob.CONFIG['PROFILE_DIR']):
if profiler.ProfileIsCurrent():
pstr = profiler.GetProfileStr()
pstr = util.RepairConfigState(pstr)
profiler.TheProfile.Load(pstr)
else:
dev_tool.DEBUGP("[InitConfig][info] Updating Profile to current version")
# Load and update profile
pstr = profiler.GetProfileStr()
pstr = util.RepairConfigState(pstr)
profiler.TheProfile.Load(pstr)
profiler.TheProfile.Update()
#---- Temporary Profile Adaptions ----#
# GUI_DEBUG mode removed in 0.2.5
mode = profiler.Profile_Get('MODE')
if mode == 'GUI_DEBUG':
profiler.Profile_Set('MODE', 'DEBUG')
# This key has been removed so clean it from old profiles
profiler.Profile_Del('LASTCHECK')
# Print modes don't use strings anymore
if isinstance(profiler.Profile_Get('PRINT_MODE'), basestring):
profiler.Profile_Set('PRINT_MODE', ed_glob.PRINT_BLACK_WHITE)
# Simplifications to eol mode persistance (0.4.28)
# Keep for now till plugins are updated
#profiler.Profile_Del('EOL') # changed to EOL_MODE
# After 0.4.65 LAST_SESSION now points a session file and not
# to a list of files to open.
sess = profiler.Profile_Get('LAST_SESSION')
if isinstance(sess, list):
profiler.Profile_Set('LAST_SESSION', u'')
#---- End Temporary Profile Adaptions ----#
# Write out updated profile
profiler.TheProfile.Write(pstr)
# When upgrading from an older version make sure all
# config directories are available.
for cfg in ("cache", "styles", "plugins", "profiles", "sessions"):
if not util.HasConfigDir(cfg):
util.MakeConfigDir(cfg)
profile_updated = True
else:
# Fresh install
util.CreateConfigDir()
# Check and upgrade installs from old location
success = True
try:
success = UpgradeOldInstall()
except Exception, msg:
dev_tool.DEBUGP("[InitConfig][err] %s" % msg)
success = False
if not success:
old_cdir = u"%s%s.%s%s" % (wx.GetHomeDir(), os.sep,
ed_glob.PROG_NAME, os.sep)
msg = ("Failed to upgrade your old installation\n"
"To retain your old settings you may need to copy some files:\n"
"\nFrom: %s\n\nTo: %s") % (old_cdir, config_base)
wx.MessageBox(msg, "Upgrade Failed", style=wx.ICON_WARNING|wx.OK)
# Set default eol for windows
if wx.Platform == '__WXMSW__':
profiler.Profile_Set('EOL_MODE', ed_glob.EOL_MODE_CRLF)
profiler.Profile_Set('ICONSZ', (16, 16))
#---- Profile Loaded / Installed ----#
# Set debug mode
emode = profiler.Profile_Get('MODE')
if 'DEBUG' in emode:
ed_glob.DEBUG = True
if emode.startswith('VERBOSE'):
ed_glob.VDEBUG = True
# Resolve resource locations
ed_glob.CONFIG['CONFIG_DIR'] = util.ResolvConfigDir(u"")
ed_glob.CONFIG['INSTALL_DIR'] = util.ResolvConfigDir(u"", True)
ed_glob.CONFIG['KEYPROF_DIR'] = util.ResolvConfigDir(u"ekeys", True)
ed_glob.CONFIG['SYSPIX_DIR'] = util.ResolvConfigDir(u"pixmaps", True)
ed_glob.CONFIG['PLUGIN_DIR'] = util.ResolvConfigDir(u"plugins")
ed_glob.CONFIG['THEME_DIR'] = util.ResolvConfigDir(os.path.join(u"pixmaps", u"theme"))
ed_glob.CONFIG['LANG_DIR'] = util.ResolvConfigDir(u"locale", True)
ed_glob.CONFIG['STYLES_DIR'] = util.ResolvConfigDir(u"styles")
ed_glob.CONFIG['SYS_PLUGIN_DIR'] = util.ResolvConfigDir(u"plugins", True)
ed_glob.CONFIG['SYS_STYLES_DIR'] = util.ResolvConfigDir(u"styles", True)
ed_glob.CONFIG['TEST_DIR'] = util.ResolvConfigDir(os.path.join(u"tests", u"syntax"), True)
# Make sure all standard config directories are there
for cfg in ("cache", "styles", "plugins", "profiles", "sessions"):
if not util.HasConfigDir(cfg):
util.MakeConfigDir(cfg)
ed_glob.CONFIG['CACHE_DIR'] = util.ResolvConfigDir(u"cache")
ed_glob.CONFIG['SESSION_DIR'] = util.ResolvConfigDir(u"sessions")
return profile_updated
#--------------------------------------------------------------------------#
def UpgradeOldInstall():
"""Upgrade an old installation and transfer all files if they exist
@note: FOR INTERNAL USE ONLY
@return: bool (True if success, False if failure)
"""
old_cdir = u"%s%s.%s%s" % (wx.GetHomeDir(), os.sep,
ed_glob.PROG_NAME, os.sep)
base = ed_glob.CONFIG['CONFIG_BASE']
if base is None:
base = wx.StandardPaths.Get().GetUserDataDir() + os.sep
err = 0
if os.path.exists(old_cdir) and \
base.lower().rstrip(os.sep) != old_cdir.lower().rstrip(os.sep):
for item in os.listdir(old_cdir):
try:
dest = os.path.join(base, item)
item = os.path.join(old_cdir, item)
if os.path.exists(dest):
if os.path.isdir(dest):
shutil.rmtree(dest, True)
else:
os.remove(dest)
shutil.move(item, dest)
except Exception, msg:
util.Log("[Upgrade][err] %s" % msg)
err += 1
continue
os.rmdir(old_cdir)
# Load the copied over profile
pstr = profiler.GetProfileStr()
prof = os.path.basename(pstr)
pstr = os.path.join(base, u"profiles", prof)
if os.path.exists(pstr):
profiler.TheProfile.Load(pstr)
profiler.TheProfile.Update()
profiler.UpdateProfileLoader()
if not err:
wx.MessageBox(_("Your profile has been updated to the latest "
"version") + u"\n" + \
_("Please check the preferences dialog to check "
"your preferences"),
_("Profile Updated"))
return not err
#--------------------------------------------------------------------------#
def PrintHelp():
"""Print command line help
@postcondition: Help is printed and program exits
"""
print ("Editra - %s - Developers Text Editor\n"
"Cody Precord (2005-2009)\n\n"
"usage: Editra [arguments] [files... ]\n\n"
"Short Arguments:\n"
" -c Set custom configuration directory at runtime\n"
" -d Turn on console debugging (-dd for verbose debug)\n"
" -D Turn off console debugging (overrides preferences)\n"
" -h Show this help message\n"
" -p Run Editra in the profiler (outputs to editra.prof).\n"
" -v Print version number and exit\n"
" -S Disable single instance checker\n"
"\nLong Arguments:\n"
" --confdir arg Set custom configuration directory at runtime\n"
" --debug Turn on console debugging\n"
" --help Show this help message\n"
" --auth Print the ipc server info\n"
" --version Print version number and exit\n"
" --profileOut arg Run Editra in the profier (arg is output file)\n"
) % ed_glob.VERSION
os._exit(0)
#--------------------------------------------------------------------------#
def ProcessCommandLine():
"""Process the command line switches
@return: tuple ({switches,}, [args,])
"""
try:
items, args = getopt.getopt(sys.argv[1:], "dhpvDSc",
['debug', 'help', 'version', 'auth',
'configdir=', 'profileOut='])
except getopt.GetoptError, msg:
# Raise error to console and exit
sys.stderr.write(str(msg) + os.linesep)
PrintHelp()
# Process command line options
opts = dict(items)
for opt, value in dict(opts).items():
if opt in ['-h', '--help']:
PrintHelp()
elif opt in ['-v', '--version']:
print ed_glob.VERSION
os._exit(0)
elif opt in ['-d', '--debug'] and '-D' not in opts.keys():
# If the debug flag is set more than once go into verbose mode
if ed_glob.DEBUG:
ed_glob.VDEBUG = True
ed_glob.DEBUG = True
opts.pop(opt)
elif opt == '-D':
ed_glob.DEBUG = False
ed_glob.VDEBUG = False
opts.pop('-D')
elif opt == '-S':
# Disable single instance checker
ed_glob.SINGLE = False
opts.pop(opt)
elif opt in ['-c', '--configdir']:
ed_glob.CONFIG['CONFIG_BASE'] = value
opts.pop(opt)
elif opt == '--profileOut':
opts['-p'] = value
opts.pop('--profileOut')
else:
pass
# Return any unprocessed arguments
return opts, args
#--------------------------------------------------------------------------#
def Main():
"""Configures and Runs an instance of Editra
@summary: Parses command line options, loads the user profile, creates
an instance of Editra and starts the main loop.
"""
opts, args = ProcessCommandLine()
if '-p' in opts:
p_file = opts['-p']
opts.pop('-p')
if not len(p_file):
# Fall back to default output file
p_file = "editra.prof"
import hotshot
prof = hotshot.Profile(p_file)
prof.runcall(_Main, opts, args)
prof.close()
else:
_Main(opts, args)
def _Main(opts, args):
"""Main method
@param opts: Commandline options
@param args: Commandline arguments
"""
# Put extern subpackage on path so that bundled external dependancies
# can be found if needed.
if not hasattr(sys, 'frozen'):
epath = os.path.join(os.path.dirname(__file__), 'extern')
if os.path.exists(epath):
sys.path.append(epath)
# Create Application
dev_tool.DEBUGP("[main][app] Initializing application...")
editra_app = Editra(False)
# Print ipc server authentication info
if '--auth' in opts:
opts.pop('--auth')
print "port=%d,key=%s" % (ed_ipc.EDPORT,
profiler.Profile_Get('SESSION_KEY'))
# Check if this is the only instance, if its not exit since
# any of the opening commands have already been passed to the
# master instance
if not editra_app.IsOnlyInstance():
dev_tool.DEBUGP("[main][info] Second instance exiting...")
editra_app.Destroy()
os._exit(0)
# Set the timeout on destroying the splash screen
wx.CallLater(2300, editra_app.DestroySplash)
if profiler.Profile_Get('SET_WSIZE'):
wsize = profiler.Profile_Get('WSIZE')
else:
wsize = (700, 450)
frame = ed_main.MainWindow(None, wx.ID_ANY, wsize, ed_glob.PROG_NAME)
frame.Maximize(profiler.Profile_Get('MAXIMIZED'))
editra_app.RegisterWindow(repr(frame), frame, True)
editra_app.SetTopWindow(frame)
frame.Show(True)
# Load Session Data
# But not if there are command line args for files to open
if profiler.Profile_Get('SAVE_SESSION', 'bool', False) and not len(args):
session = profiler.Profile_Get('LAST_SESSION', default=u'')
if isinstance(session, list):
# Check for format conversion from previous versions
profiler.Profile_Set('LAST_SESSION', u'')
else:
frame.GetNotebook().LoadSessionFile(session)
# Unlike wxMac/wxGTK Windows doesn't post an activate event when a window
# is first shown, so do it manually to make sure all event handlers get
# pushed.
if wx.Platform == '__WXMSW__':
wx.PostEvent(frame, wx.ActivateEvent(wx.wxEVT_ACTIVATE, True))
# Do update check, only check if its been more than a day since the last
# check
isadmin = os.access(ed_glob.CONFIG['INSTALL_DIR'], os.R_OK|os.W_OK)
if isadmin and profiler.Profile_Get('CHECKUPDATE', default=True):
uthread = updater.UpdateThread(editra_app, ID_UPDATE_CHECK)
uthread.start()
for arg in args:
try:
arg = os.path.abspath(arg)
fname = ed_txt.DecodeString(arg, sys.getfilesystemencoding())
frame.DoOpen(ed_glob.ID_COMMAND_LINE_OPEN, fname)
except IndexError:
dev_tool.DEBUGP("[main][err] IndexError on commandline args")
# Notify that profile was updated
if editra_app.GetProfileUpdated():
editra_app.DestroySplash()
# Make sure window iniliazes to default position
profiler.Profile_Del('WPOS')
wx.MessageBox(_("Your profile has been updated to the latest "
"version") + u"\n" + \
_("Please check the preferences dialog to check "
"your preferences"),
_("Profile Updated"))
# 3. Start Applications Main Loop
dev_tool.DEBUGP("[main][info] Starting MainLoop...")
wx.CallAfter(frame.Raise)
editra_app.MainLoop()
dev_tool.DEBUGP("[main][info] MainLoop finished exiting application")
os._exit(0)
#-----------------------------------------------------------------------------#
if __name__ == '__main__':
Main()
| 36.020154
| 96
| 0.562945
|
b75373d07fd75c9cd90e27b982cd9dcba100cbc5
| 2,902
|
py
|
Python
|
dump.py
|
TimSC/osm2pgcopy
|
9276427d4042673fa35d626c80224203148c7c7d
|
[
"MIT"
] | null | null | null |
dump.py
|
TimSC/osm2pgcopy
|
9276427d4042673fa35d626c80224203148c7c7d
|
[
"MIT"
] | null | null | null |
dump.py
|
TimSC/osm2pgcopy
|
9276427d4042673fa35d626c80224203148c7c7d
|
[
"MIT"
] | null | null | null |
from pyo5m import o5m
import gzip, json, config, datetime, time
import psycopg2, psycopg2.extras, psycopg2.extensions #apt install python-psycopg2
if __name__=="__main__":
conn = psycopg2.connect("dbname='{0}' user='{1}' host='{2}' password='{3}'".format(config.dbname, config.dbuser, config.dbhost, config.dbpass))
#left,bottom,right,top
bbox = [-180.0, -90.0, 180.0, 90.0]
fi = gzip.open("out.o5m.gz", "wb")
enc = o5m.O5mEncode(fi)
enc.StoreIsDiff(False)
enc.StoreBounds(bbox)
#Get nodes
count = 0
lastUpdateTime = time.time()
lastUpdateCount = 0
query = ("SELECT *, ST_X(geom) as lon, ST_Y(geom) AS lat FROM {0}nodes".format(config.dbtableprefix) +
" WHERE visible=true and current=true;")
cur = conn.cursor('node-cursor', cursor_factory=psycopg2.extras.DictCursor)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE, cur)
cur.execute(query, bbox)
for row in cur:
count+= 1
if count % 1000000 == 0:
print count, "nodes"
nid = row["id"]
metaData = (row["version"], datetime.datetime.fromtimestamp(row["timestamp"]),
row["changeset"], row["uid"], row["username"], row["visible"])
enc.StoreNode(nid, metaData, row["tags"], (row["lat"], row["lon"]))
timeNow = time.time()
if timeNow - lastUpdateTime > 1.0:
print count - lastUpdateCount, "nodes/sec"
lastUpdateCount = count
lastUpdateTime = timeNow
cur.close()
print "num nodes", count
enc.Reset()
#Get ways
query = ("SELECT * FROM {0}ways".format(config.dbtableprefix) +
" WHERE visible=true and current=true;")
cur = conn.cursor('way-cursor', cursor_factory=psycopg2.extras.DictCursor)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE, cur)
cur.execute(query)
count = 0
for row in cur:
count += 1
if count % 1000000 == 0:
print count, "ways"
wid = row["id"]
metaData = (row["version"], datetime.datetime.fromtimestamp(row["timestamp"]),
row["changeset"], row["uid"], row["username"], row["visible"])
enc.StoreWay(wid, metaData, row["tags"], row["members"])
cur.close()
print "num ways", count
enc.Reset()
#Get relations
query = ("SELECT * FROM {0}relations".format(config.dbtableprefix) +
" WHERE visible=true and current=true;")
cur = conn.cursor('relation-cursor', cursor_factory=psycopg2.extras.DictCursor)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE, cur)
cur.execute(query)
count = 0
for row in cur:
count += 1
if count % 1000000 == 0:
print count, "relations"
rid = row["id"]
mems = []
for (memTy, memId), memRole in zip(row["members"], row["memberroles"]):
mems.append((memTy, memId, memRole))
metaData = (row["version"], datetime.datetime.fromtimestamp(row["timestamp"]),
row["changeset"], row["uid"], row["username"], row["visible"])
enc.StoreRelation(rid, metaData, row["tags"], mems)
cur.close()
print "num relatons", count
enc.Finish()
fi.close()
print "All done"
| 30.87234
| 144
| 0.688491
|
3d962f18ecb0de664d1a6d7a82d13509d6db88a3
| 781
|
py
|
Python
|
machine-learning/image-transformation/scaling.py
|
gizzmo25/pythoncode-tutorials
|
39a413fc1da232ad6de7e5f1e8955564dc65448e
|
[
"MIT"
] | null | null | null |
machine-learning/image-transformation/scaling.py
|
gizzmo25/pythoncode-tutorials
|
39a413fc1da232ad6de7e5f1e8955564dc65448e
|
[
"MIT"
] | null | null | null |
machine-learning/image-transformation/scaling.py
|
gizzmo25/pythoncode-tutorials
|
39a413fc1da232ad6de7e5f1e8955564dc65448e
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import matplotlib.pyplot as plt
# read the input image
img = cv2.imread("city.jpg")
# convert from BGR to RGB so we can plot using matplotlib
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# disable x & y axis
plt.axis('off')
# show the image
plt.imshow(img)
plt.show()
# get the image shape
rows, cols, dim = img.shape
#transformation matrix for Scaling
M = np.float32([[1.5, 0 , 0],
[0, 1.8, 0],
[0, 0, 1]])
# apply a perspective transformation to the image
scaled_img = cv2.warpPerspective(img,M,(cols*2,rows*2))
# disable x & y axis
plt.axis('off')
# show the resulting image
plt.imshow(scaled_img)
plt.show()
# save the resulting image to disk
plt.imsave("city_scaled.jpg", scaled_img)
| 26.033333
| 58
| 0.663252
|
35250cbff80fd33585a50103fdfb62f992281fd0
| 26,269
|
py
|
Python
|
salt/fileclient.py
|
d--j/salt
|
579f900be67a80e1a77674bc6aa21fec836c1c4c
|
[
"Apache-2.0"
] | null | null | null |
salt/fileclient.py
|
d--j/salt
|
579f900be67a80e1a77674bc6aa21fec836c1c4c
|
[
"Apache-2.0"
] | null | null | null |
salt/fileclient.py
|
d--j/salt
|
579f900be67a80e1a77674bc6aa21fec836c1c4c
|
[
"Apache-2.0"
] | null | null | null |
'''
Classes that manage file clients
'''
# Import python libs
import contextlib
import logging
import hashlib
import os
import shutil
import string
import subprocess
# Import third party libs
import yaml
# Import salt libs
from salt.exceptions import MinionError, SaltReqTimeoutError
import salt.client
import salt.crypt
import salt.loader
import salt.payload
import salt.utils
import salt.utils.templates
import salt.utils.gzip_util
from salt._compat import (
URLError, HTTPError, BaseHTTPServer, urlparse, url_open)
log = logging.getLogger(__name__)
def get_file_client(opts):
'''
Read in the ``file_client`` option and return the correct type of file
server
'''
return {
'remote': RemoteClient,
'local': LocalClient
}.get(opts['file_client'], RemoteClient)(opts)
class Client(object):
'''
Base class for Salt file interactions
'''
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
def _check_proto(self, path):
'''
Make sure that this path is intended for the salt master and trim it
'''
if not path.startswith('salt://'):
raise MinionError('Unsupported path: {0}'.format(path))
return path[7:]
def _file_local_list(self, dest):
'''
Helper util to return a list of files in a directory
'''
if os.path.isdir(dest):
destdir = dest
else:
destdir = os.path.dirname(dest)
filelist = set()
for root, dirs, files in os.walk(destdir, followlinks=True):
for name in files:
path = os.path.join(root, name)
filelist.add(path)
return filelist
@contextlib.contextmanager
def _cache_loc(self, path, env='base'):
'''
Return the local location to cache the file, cache dirs will be made
'''
dest = os.path.join(self.opts['cachedir'],
'files',
env,
path)
destdir = os.path.dirname(dest)
cumask = os.umask(63)
if not os.path.isdir(destdir):
# remove destdir if it is a regular file to avoid an OSError when
# running os.makedirs below
if os.path.isfile(destdir):
os.remove(destdir)
os.makedirs(destdir)
yield dest
os.umask(cumask)
def get_file(self, path, dest='', makedirs=False, env='base', gzip=None):
'''
Copies a file from the local files or master depending on
implementation
'''
raise NotImplementedError
def file_list_emptydirs(self, env='base'):
'''
List the empty dirs
'''
raise NotImplementedError
def cache_file(self, path, env='base'):
'''
Pull a file down from the file server and store it in the minion
file cache
'''
return self.get_url(path, '', True, env)
def cache_files(self, paths, env='base'):
'''
Download a list of files stored on the master and put them in the
minion file cache
'''
ret = []
if isinstance(paths, str):
paths = paths.split(',')
for path in paths:
ret.append(self.cache_file(path, env))
return ret
def cache_master(self, env='base'):
'''
Download and cache all files on a master in a specified environment
'''
ret = []
for path in self.file_list(env):
ret.append(self.cache_file('salt://{0}'.format(path), env))
return ret
def cache_dir(self, path, env='base', include_empty=False):
'''
Download all of the files in a subdir of the master
'''
ret = []
path = self._check_proto(path)
# We want to make sure files start with this *directory*, use
# '/' explicitly because the master (that's generating the
# list of files) only runs on POSIX
if not path.endswith('/'):
path = path + '/'
log.info(
'Caching directory \'{0}\' for environment \'{1}\''.format(
path, env
)
)
#go through the list of all files finding ones that are in
#the target directory and caching them
ret.extend([self.cache_file('salt://' + fn_, env)
for fn_ in self.file_list(env)
if fn_.strip() and fn_.startswith(path)])
if include_empty:
# Break up the path into a list containing the bottom-level
# directory (the one being recursively copied) and the directories
# preceding it
#separated = string.rsplit(path, '/', 1)
#if len(separated) != 2:
# # No slashes in path. (So all files in env will be copied)
# prefix = ''
#else:
# prefix = separated[0]
dest = salt.utils.path_join(
self.opts['cachedir'],
'files',
env
)
for fn_ in self.file_list_emptydirs(env):
if fn_.startswith(path):
minion_dir = '{0}/{1}'.format(dest, fn_)
if not os.path.isdir(minion_dir):
os.makedirs(minion_dir)
ret.append(minion_dir)
return ret
def cache_local_file(self, path, **kwargs):
'''
Cache a local file on the minion in the localfiles cache
'''
dest = os.path.join(self.opts['cachedir'], 'localfiles',
path.lstrip('/'))
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
shutil.copyfile(path, dest)
return dest
def file_local_list(self, env='base'):
'''
List files in the local minion files and localfiles caches
'''
filesdest = os.path.join(self.opts['cachedir'], 'files', env)
localfilesdest = os.path.join(self.opts['cachedir'], 'localfiles')
fdest = self._file_local_list(filesdest)
ldest = self._file_local_list(localfilesdest)
return sorted(fdest.union(ldest))
def file_list(self, env='base'):
'''
This function must be overwritten
'''
return []
def dir_list(self, env='base'):
'''
This function must be overwritten
'''
return []
def is_cached(self, path, env='base'):
'''
Returns the full path to a file if it is cached locally on the minion
otherwise returns a blank string
'''
localsfilesdest = os.path.join(
self.opts['cachedir'], 'localfiles', path.lstrip('/'))
filesdest = os.path.join(
self.opts['cachedir'], 'files', env, path.lstrip('salt://'))
if os.path.exists(filesdest):
return filesdest
elif os.path.exists(localsfilesdest):
return localsfilesdest
return ''
def list_states(self, env):
'''
Return a list of all available sls modules on the master for a given
environment
'''
states = []
for path in self.file_list(env):
if path.endswith('.sls'):
# is an sls module!
if path.endswith('{0}init.sls'.format('/')):
states.append(path.replace('/', '.')[:-9])
else:
states.append(path.replace('/', '.')[:-4])
return states
def get_state(self, sls, env):
'''
Get a state file from the master and store it in the local minion
cache return the location of the file
'''
if '.' in sls:
sls = sls.replace('.', '/')
for path in ['salt://{0}.sls'.format(sls),
'/'.join(['salt:/', sls, 'init.sls'])]:
dest = self.cache_file(path, env)
if dest:
return {'source': path, 'dest': dest}
return {}
def get_dir(self, path, dest='', env='base', gzip=None):
'''
Get a directory recursively from the salt-master
'''
# TODO: We need to get rid of using the string lib in here
ret = []
# Strip trailing slash
path = string.rstrip(self._check_proto(path), '/')
# Break up the path into a list containing the bottom-level directory
# (the one being recursively copied) and the directories preceding it
separated = string.rsplit(path, '/', 1)
if len(separated) != 2:
# No slashes in path. (This means all files in env will be copied)
prefix = ''
else:
prefix = separated[0]
# Copy files from master
for fn_ in self.file_list(env):
if fn_.startswith(path):
# Prevent files in "salt://foobar/" (or salt://foo.sh) from
# matching a path of "salt://foo"
try:
if fn_[len(path)] != '/':
continue
except IndexError:
continue
# Remove the leading directories from path to derive
# the relative path on the minion.
minion_relpath = string.lstrip(fn_[len(prefix):], '/')
ret.append(
self.get_file(
'salt://{0}'.format(fn_),
'{0}/{1}'.format(dest, minion_relpath),
True, env, gzip
)
)
# Replicate empty dirs from master
for fn_ in self.file_list_emptydirs(env):
if fn_.startswith(path):
# Prevent an empty dir "salt://foobar/" from matching a path of
# "salt://foo"
try:
if fn_[len(path)] != '/':
continue
except IndexError:
continue
# Remove the leading directories from path to derive
# the relative path on the minion.
minion_relpath = string.lstrip(fn_[len(prefix):], '/')
minion_mkdir = '{0}/{1}'.format(dest, minion_relpath)
if not os.path.isdir(minion_mkdir):
os.makedirs(minion_mkdir)
ret.append(minion_mkdir)
ret.sort()
return ret
def get_url(self, url, dest, makedirs=False, env='base'):
'''
Get a single file from a URL.
'''
url_data = urlparse(url)
if url_data.scheme == 'salt':
return self.get_file(url, dest, makedirs, env)
if dest:
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
if makedirs:
os.makedirs(destdir)
else:
return ''
else:
dest = salt.utils.path_join(
self.opts['cachedir'],
'extrn_files',
env,
url_data.netloc,
url_data.path
)
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
try:
with contextlib.closing(url_open(url)) as srcfp:
with salt.utils.fopen(dest, 'wb') as destfp:
shutil.copyfileobj(srcfp, destfp)
return dest
except HTTPError as ex:
raise MinionError('HTTP error {0} reading {1}: {3}'.format(
ex.code,
url,
*BaseHTTPServer.BaseHTTPRequestHandler.responses[ex.code]))
except URLError as ex:
raise MinionError('Error reading {0}: {1}'.format(url, ex.reason))
def get_template(
self,
url,
dest,
template='jinja',
makedirs=False,
env='base',
**kwargs):
'''
Cache a file then process it as a template
'''
kwargs['env'] = env
url_data = urlparse(url)
sfn = self.cache_file(url, env)
if not os.path.exists(sfn):
return ''
if template in salt.utils.templates.TEMPLATE_REGISTRY:
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
sfn,
**kwargs
)
else:
log.error('Attempted to render template with unavailable engine '
'{0}'.format(template))
return ''
if not data['result']:
# Failed to render the template
log.error(
'Failed to render template with error: {0}'.format(
data['data']
)
)
return ''
if not dest:
# No destination passed, set the dest as an extrn_files cache
dest = salt.utils.path_join(
self.opts['cachedir'],
'extrn_files',
env,
url_data.netloc,
url_data.path
)
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
if makedirs:
os.makedirs(destdir)
else:
salt.utils.safe_rm(data['data'])
return ''
shutil.move(data['data'], dest)
return dest
class LocalClient(Client):
'''
Use the local_roots option to parse a local file root
'''
def __init__(self, opts):
Client.__init__(self, opts)
def _find_file(self, path, env='base'):
'''
Locate the file path
'''
fnd = {'path': '',
'rel': ''}
if env not in self.opts['file_roots']:
return fnd
if path.startswith('|'):
# The path arguments are escaped
path = path[1:]
for root in self.opts['file_roots'][env]:
full = os.path.join(root, path)
if os.path.isfile(full):
fnd['path'] = full
fnd['rel'] = path
return fnd
return fnd
def get_file(self, path, dest='', makedirs=False, env='base', gzip=None):
'''
Copies a file from the local files directory into :param:`dest`
gzip compression settings are ignored for local files
'''
path = self._check_proto(path)
fnd = self._find_file(path, env)
if not fnd['path']:
return ''
return fnd['path']
def file_list(self, env='base'):
'''
Return a list of files in the given environment
'''
ret = []
if env not in self.opts['file_roots']:
return ret
for path in self.opts['file_roots'][env]:
for root, dirs, files in os.walk(path, followlinks=True):
for fname in files:
ret.append(
os.path.relpath(
os.path.join(root, fname),
path
)
)
return ret
def file_list_emptydirs(self, env='base'):
'''
List the empty dirs in the file_roots
'''
ret = []
if env not in self.opts['file_roots']:
return ret
for path in self.opts['file_roots'][env]:
for root, dirs, files in os.walk(path, followlinks=True):
if len(dirs) == 0 and len(files) == 0:
ret.append(os.path.relpath(root, path))
return ret
def dir_list(self, env='base'):
'''
List the dirs in the file_roots
'''
ret = []
if env not in self.opts['file_roots']:
return ret
for path in self.opts['file_roots'][env]:
for root, dirs, files in os.walk(path, followlinks=True):
ret.append(os.path.relpath(root, path))
return ret
def hash_file(self, path, env='base'):
'''
Return the hash of a file, to get the hash of a file in the file_roots
prepend the path with salt://<file on server> otherwise, prepend the
file with / for a local file.
'''
ret = {}
try:
path = self._check_proto(path)
except MinionError:
if not os.path.isfile(path):
err = 'Specified file {0} is not present to generate hash'
log.warning(err.format(path))
return ret
else:
with salt.utils.fopen(path, 'rb') as ifile:
ret['hsum'] = hashlib.md5(ifile.read()).hexdigest()
ret['hash_type'] = 'md5'
return ret
path = self._find_file(path, env)['path']
if not path:
return {}
ret = {}
with salt.utils.fopen(path, 'rb') as ifile:
ret['hsum'] = getattr(hashlib, self.opts['hash_type'])(
ifile.read()).hexdigest()
ret['hash_type'] = self.opts['hash_type']
return ret
def list_env(self, env='base'):
'''
Return a list of the files in the file server's specified environment
'''
return self.file_list(env)
def master_opts(self):
'''
Return the master opts data
'''
return self.opts
def ext_nodes(self):
'''
Return the metadata derived from the external nodes system on the local
system
'''
if not self.opts['external_nodes']:
return {}
if not salt.utils.which(self.opts['external_nodes']):
log.error(('Specified external nodes controller {0} is not'
' available, please verify that it is installed'
'').format(self.opts['external_nodes']))
return {}
cmd = '{0} {1}'.format(self.opts['external_nodes'], self.opts['id'])
ndata = yaml.safe_load(subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE
).communicate()[0])
ret = {}
if 'environment' in ndata:
env = ndata['environment']
else:
env = 'base'
if 'classes' in ndata:
if isinstance(ndata['classes'], dict):
ret[env] = list(ndata['classes'])
elif isinstance(ndata['classes'], list):
ret[env] = ndata['classes']
else:
return ret
return ret
class RemoteClient(Client):
'''
Interact with the salt master file server.
'''
def __init__(self, opts):
Client.__init__(self, opts)
self.auth = salt.crypt.SAuth(opts)
self.sreq = salt.payload.SREQ(self.opts['master_uri'])
def get_file(self, path, dest='', makedirs=False, env='base', gzip=None):
'''
Get a single file from the salt-master
path must be a salt server location, aka, salt://path/to/file, if
dest is omitted, then the downloaded file will be placed in the minion
cache
'''
log.info('Fetching file \'{0}\''.format(path))
d_tries = 0
path = self._check_proto(path)
load = {'path': path,
'env': env,
'cmd': '_serve_file'}
if gzip:
gzip = int(gzip)
load['gzip'] = gzip
fn_ = None
if dest:
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
if makedirs:
os.makedirs(destdir)
else:
return False
fn_ = salt.utils.fopen(dest, 'wb+')
while True:
if not fn_:
load['loc'] = 0
else:
load['loc'] = fn_.tell()
try:
data = self.auth.crypticle.loads(
self.sreq.send('aes',
self.auth.crypticle.dumps(load),
3,
60)
)
except SaltReqTimeoutError:
return ''
if not data['data']:
if not fn_ and data['dest']:
# This is a 0 byte file on the master
with self._cache_loc(data['dest'], env) as cache_dest:
dest = cache_dest
with salt.utils.fopen(cache_dest, 'wb+') as ofile:
ofile.write(data['data'])
if 'hsum' in data and d_tries < 3:
# Master has prompted a file verification, if the
# verification fails, redownload the file. Try 3 times
d_tries += 1
with salt.utils.fopen(dest, 'rb') as fp_:
hsum = getattr(
hashlib,
data.get('hash_type', 'md5')
)(fp_.read()).hexdigest()
if hsum != data['hsum']:
log.warn('Bad download of file {0}, attempt {1} '
'of 3'.format(path, d_tries))
continue
break
if not fn_:
with self._cache_loc(data['dest'], env) as cache_dest:
dest = cache_dest
# If a directory was formerly cached at this path, then
# remove it to avoid a traceback trying to write the file
if os.path.isdir(dest):
salt.utils.rm_rf(dest)
fn_ = salt.utils.fopen(dest, 'wb+')
if data.get('gzip', None):
data = salt.utils.gzip_util.uncompress(data['data'])
else:
data = data['data']
fn_.write(data)
if fn_:
fn_.close()
return dest
def file_list(self, env='base'):
'''
List the files on the master
'''
load = {'env': env,
'cmd': '_file_list'}
try:
return self.auth.crypticle.loads(
self.sreq.send('aes',
self.auth.crypticle.dumps(load),
3,
60)
)
except SaltReqTimeoutError:
return ''
def file_list_emptydirs(self, env='base'):
'''
List the empty dirs on the master
'''
load = {'env': env,
'cmd': '_file_list_emptydirs'}
try:
return self.auth.crypticle.loads(
self.sreq.send('aes',
self.auth.crypticle.dumps(load),
3,
60)
)
except SaltReqTimeoutError:
return ''
def dir_list(self, env='base'):
'''
List the dirs on the master
'''
load = {'env': env,
'cmd': '_dir_list'}
try:
return self.auth.crypticle.loads(
self.sreq.send('aes',
self.auth.crypticle.dumps(load),
3,
60)
)
except SaltReqTimeoutError:
return ''
def hash_file(self, path, env='base'):
'''
Return the hash of a file, to get the hash of a file on the salt
master file server prepend the path with salt://<file on server>
otherwise, prepend the file with / for a local file.
'''
try:
path = self._check_proto(path)
except MinionError:
if not os.path.isfile(path):
err = 'Specified file {0} is not present to generate hash'
log.warning(err.format(path))
return {}
else:
ret = {}
with salt.utils.fopen(path, 'rb') as ifile:
ret['hsum'] = hashlib.md5(ifile.read()).hexdigest()
ret['hash_type'] = 'md5'
return ret
load = {'path': path,
'env': env,
'cmd': '_file_hash'}
try:
return self.auth.crypticle.loads(
self.sreq.send('aes',
self.auth.crypticle.dumps(load),
3,
60)
)
except SaltReqTimeoutError:
return ''
def list_env(self, env='base'):
'''
Return a list of the files in the file server's specified environment
'''
load = {'env': env,
'cmd': '_file_list'}
try:
return self.auth.crypticle.loads(
self.sreq.send('aes',
self.auth.crypticle.dumps(load),
3,
60)
)
except SaltReqTimeoutError:
return ''
def master_opts(self):
'''
Return the master opts data
'''
load = {'cmd': '_master_opts'}
try:
return self.auth.crypticle.loads(
self.sreq.send('aes',
self.auth.crypticle.dumps(load),
3,
60)
)
except SaltReqTimeoutError:
return ''
def ext_nodes(self):
'''
Return the metadata derived from the external nodes system on the
master.
'''
load = {'cmd': '_ext_nodes',
'id': self.opts['id'],
'opts': self.opts}
try:
return self.auth.crypticle.loads(
self.sreq.send('aes',
self.auth.crypticle.dumps(load),
3,
60)
)
except SaltReqTimeoutError:
return ''
| 33.42112
| 79
| 0.487609
|
2c6d0ead433a33ba5f421dfec165b692a6b664da
| 316
|
py
|
Python
|
Core/urls.py
|
FranckyCastell/Clip_365
|
72c49da854ed974ea47c51720eca2e82cf6f73e1
|
[
"Apache-2.0"
] | null | null | null |
Core/urls.py
|
FranckyCastell/Clip_365
|
72c49da854ed974ea47c51720eca2e82cf6f73e1
|
[
"Apache-2.0"
] | null | null | null |
Core/urls.py
|
FranckyCastell/Clip_365
|
72c49da854ed974ea47c51720eca2e82cf6f73e1
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('Home.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 28.727273
| 76
| 0.759494
|
751ec55d0ec3fb772407fb26cb40477217d57065
| 22,287
|
py
|
Python
|
train/train.py
|
paranoidai/Fairface-Recognition-Solution
|
7f12bc4462cc765fe8d7a7fa820c63bfe2cc9121
|
[
"MIT"
] | 7
|
2020-07-20T10:16:13.000Z
|
2021-07-29T21:00:55.000Z
|
train/train.py
|
paranoidai/Fairface-Recognition-Solution
|
7f12bc4462cc765fe8d7a7fa820c63bfe2cc9121
|
[
"MIT"
] | null | null | null |
train/train.py
|
paranoidai/Fairface-Recognition-Solution
|
7f12bc4462cc765fe8d7a7fa820c63bfe2cc9121
|
[
"MIT"
] | 3
|
2020-08-03T03:11:04.000Z
|
2021-05-15T15:56:58.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import math
import random
import logging
import sklearn
import pickle
import numpy as np
import mxnet as mx
from mxnet import ndarray as nd
import argparse
import mxnet.optimizer as optimizer
from config import config, default, generate_config
from metric import *
from common import flops_counter
from eval import verification
from symbol import fresnet
from symbol import resnest
import time
from pair_wise_loss import embedding_2_pairwise_loss
from class_level_loss import embedding_2_class_level_loss
logger = logging.getLogger()
logger.setLevel(logging.INFO)
args = None
fixed_param_names = []
def parse_args():
parser = argparse.ArgumentParser(description='Train face network')
# general
parser.add_argument('--dataset', default=default.dataset, help='dataset config')
parser.add_argument('--network', default=default.network, help='network config')
parser.add_argument('--loss', default=default.loss, help='loss config')
args, rest = parser.parse_known_args()
generate_config(args.network, args.dataset, args.loss)
# custom
parser.add_argument('--models-root', default=default.models_root, help='root directory to save model.')
parser.add_argument('--pretrained', default=default.pretrained, help='pretrained model to load')
parser.add_argument('--pretrained-epoch', type=int, default=default.pretrained_epoch, help='pretrained epoch to load')
parser.add_argument('--ckpt', type=int, default=default.ckpt, help='checkpoint saving option. 0: discard saving. 1: save when necessary. 2: always save')
parser.add_argument('--verbose', type=int, default=default.verbose, help='do verification testing and model saving every verbose batches')
parser.add_argument('--num-workers', type=int, default=default.num_workers, help='number of workers for data loading')
parser.add_argument('--cos-lr', action='store_true', help='whether to use cosine lr schedule.')
parser.add_argument('--lr', type=float, default=default.lr, help='start learning rate')
parser.add_argument('--lr-steps', type=str, default=default.lr_steps, help='steps of lr changing')
parser.add_argument('--end-epoch', type=int, default=default.end_epoch, help='number of training epochs (default: 120)')
parser.add_argument('--frequent', type=int, default=default.frequent, help='Number of batches to wait before logging.')
parser.add_argument('--per-batch-size', type=int, default=default.per_batch_size, help='batch size in each context')
parser.add_argument('--kvstore', type=str, default=default.kvstore, help='kvstore setting')
parser.add_argument('--opt', type=str, default=default.opt, help='optmizer name')
parser.add_argument('--no-wd', action='store_true', help='whether to remove weight decay on bias, and beta/gamma for batchnorm layers.')
parser.add_argument('--selected-attributes', type=int,default=None)
parser.add_argument('--last-gamma', action='store_true',
help='whether to init gamma of the last BN layer in each bottleneck to 0.')
parser.add_argument('--freeze-block', type = int, default = 0,
help='whether to freeze the pre-layer for finetune')
parser.add_argument('--label-smoothing', action='store_true',
help='use label smoothing or not in training. default is false.')
parser.add_argument('--model-visual', action='store_true',
help='visualize Neural Networks as computation graph.')
args = parser.parse_args()
return args
def get_symbol(args):
if(config.net_output == 'ECCV'):
embedding,attr_softmax,body = eval(config.net_name).get_symbol(fixed_param_names=fixed_param_names)
all_label = mx.symbol.Variable('softmax_label')
gt_label = all_label
#class_label = mx.symbol.Variable('face_attr_label')
#gt_label = class_label
gt_label = mx.symbol.slice_axis(all_label, axis=1, begin=0, end=1)
class_label = mx.symbol.slice_axis(all_label, axis=1, begin=1, end=2)
gt_label = mx.symbol.Reshape(data = gt_label, shape = (-1))
class_label = mx.symbol.Reshape(data = class_label, shape = (-1))
#attr_softmax = mx.sym.FullyConnected(data=attr_softmax, num_hidden=4)
#if(config.fp_16):
# attr_softmax = mx.sym.Cast(data=attr_softmax, dtype=np.float32)
#softmax_class = mx.symbol.SoftmaxOutput(data=attr_softmax, label = class_label, name='softmax', normalization='valid', grad_scale=128.0)
attr_softmax_loss = mx.symbol.log(attr_softmax+1e-5)
_label = mx.sym.one_hot(class_label, depth = 4, on_value = -1.0, off_value = 0.0)
attr_softmax_loss = attr_softmax_loss*_label
attr_softmax_loss = mx.symbol.sum(attr_softmax_loss)/args.per_batch_size
if(config.fp_16):
attr_softmax_loss = mx.symbol.MakeLoss(attr_softmax_loss, grad_scale=config.scale16)
else:
attr_softmax_loss = mx.symbol.MakeLoss(attr_softmax_loss)
else:
embedding = eval(config.net_name).get_symbol(fixed_param_names=fixed_param_names)
all_label = mx.symbol.Variable('softmax_label')
gt_label = all_label
#gt_label = class_label
out_list = []
out_list.append(mx.symbol.BlockGrad(embedding))
if config.loss_name.find('fusion')>=0:
triplet_loss_type = config.loss_name.split('_fusion_')[0]
class_level_loss_type = config.loss_name.split('_fusion_')[1]
print(triplet_loss_type, class_level_loss_type)
triplet_loss = embedding_2_pairwise_loss(embedding, triplet_loss_type, gt_label, args.per_batch_size // 4 * 3)
class_level_loss, orgLogits, ce_loss = embedding_2_class_level_loss(embedding, class_level_loss_type, gt_label, args.per_batch_size)
out_list.append(mx.sym.BlockGrad(gt_label))
out_list.append(triplet_loss)
out_list.append(class_level_loss)
final_loss = triplet_loss + ce_loss
#out_list.append(mx.sym.BlockGrad(sp))
#out_list.append(mx.sym.BlockGrad(sn))
out_list.append(mx.sym.BlockGrad(final_loss))
elif config.loss_name.find('triplet') >=0:
triplet_batch_size = args.per_batch_size
triplet_loss_type = config.loss_name
print('triplet_loss_type ', triplet_loss_type)
triplet_loss = embedding_2_pairwise_loss(embedding, triplet_loss_type, gt_label, triplet_batch_size)
out_list.append(mx.sym.BlockGrad(gt_label))
#out_list.append(mx.sym.BlockGrad(sp))
#out_list.append(mx.sym.BlockGrad(sn))
out_list.append(triplet_loss)
elif config.loss_name == 'final_softmax':
anchor_index = mx.symbol.argmax(gt_label)
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n')
#anchor = mx.symbol.slice_axis(nembedding, axis=0, begin=0, end=anchor_index)
#ap = mx.sym.broadcast_mul(gt_one_hot, diff)
gt_label = mx.sym.Reshape(data = gt_label, shape = (77, 1))
ap_emb = mx.sym.broadcast_mul(nembedding, gt_label)
ap_emb = mx.symbol.sum(ap_emb, axis=0, keepdims=0)
data_shape = {'data':(300,3,112,112)}
ap_emb = mx.sym.Reshape(data = ap_emb, shape = (1, 512))
ap = mx.sym.broadcast_mul(nembedding, ap_emb)
ap = mx.symbol.sum(ap, axis=1, keepdims=1)
loss = 1 - ap
#arg_shape, out_shape, _ = ap_emb.infer_shape(**data_shape)
#print(out_shape)
#exit()
#final_loss = ap + gt_label
#final_loss = mx.sym.broadcast_add(ap, gt_label)
#final_loss = mx.sym.sum(final_loss, axis=1, keepdims=1)
triplet_loss = mx.symbol.MakeLoss(loss)
out_list.append(mx.sym.BlockGrad(gt_label))
out_list.append(triplet_loss)
elif config.loss_name.find('softmax')>=0:
class_level_loss_type = config.loss_name
class_level_loss, orgLogits, ce_loss = embedding_2_class_level_loss(embedding, class_level_loss_type, gt_label, args.per_batch_size)
out_list.append(mx.symbol.BlockGrad(mx.symbol.SoftmaxActivation(data=orgLogits)))
if(config.net_output == 'ECCV'):
out_list.append(attr_softmax_loss)
out_list.append(mx.sym.BlockGrad(class_level_loss))
#out_list.append(mx.symbol.BlockGrad(orgLogits))
if(config.net_output == 'ECCV'):
out_list.append(mx.symbol.BlockGrad(attr_softmax))
out_list.append(class_label)
out_list.append(mx.sym.BlockGrad(ce_loss))
out = mx.symbol.Group(out_list)
return out
def train_net(args):
ctx = []
cvd = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if len(cvd)>0:
for i in range(len(cvd.split(','))):
ctx.append(mx.gpu(i))
if len(ctx)==0:
ctx = [mx.cpu()]
print('use cpu')
else:
print('gpu num:', len(ctx))
curTime = time.strftime("%Y%m%d%H%M%S", time.localtime())
prefix = os.path.join(args.models_root, '%s-%s-%s-%s'%(curTime, args.network, args.loss, args.dataset), 'model')
prefix_dir = os.path.dirname(prefix)
print('prefix', prefix)
if not os.path.exists(prefix_dir):
os.makedirs(prefix_dir)
args.ctx_num = len(ctx)
args.batch_size = args.per_batch_size*args.ctx_num
args.image_channel = config.image_shape[2]
config.batch_size = args.batch_size
config.per_batch_size = args.per_batch_size
config.no_wd = args.no_wd
config.last_gamma = args.last_gamma
if(args.freeze_block == 1):
config.bn_mom = 1.0
print('bbbbbbbbbbbbbbbbbn', config.bn_mom)
data_dir = config.dataset_path
path_imgrec = None
path_imglist = None
image_size = config.image_shape[0:2]
assert len(image_size)==2
#assert image_size[0]==image_size[1]
print('image_size', image_size)
print('num_classes', config.num_classes)
path_imgrec = os.path.join(data_dir, "train.rec")
print('Called with argument:', args, config)
data_shape = (args.image_channel,image_size[0],image_size[1])
mean = None
begin_epoch = 0
if len(args.pretrained)==0:
arg_params = None
aux_params = None
sym = get_symbol(args)
else:
print('loading', args.pretrained, args.pretrained_epoch)
_, arg_params, aux_params = mx.model.load_checkpoint(args.pretrained, args.pretrained_epoch)
#for item in arg_params:
# print(item)
#print(arg_params)
#exit()
sym = get_symbol(args)
if args.model_visual:
mx.viz.plot_network(sym,title='model',save_format='pdf',shape={'data':(64,3,224,224), 'label':(64,)}).view()
exit(0)
if config.count_flops:
all_layers = sym.get_internals()
pre_fix = ''
if(config.emb_size == 2048):
pre_fix = '2048_'
_sym = all_layers[pre_fix + 'fc1_output']
FLOPs = flops_counter.count_flops(_sym, data=(1,3,image_size[0],image_size[1]))
_str = flops_counter.flops_str(FLOPs)
print('Network FLOPs: %s'%_str)
#label_name = 'softmax_label'
#label_shape = (args.batch_size,)
emb_symbol = sym.get_internals()[pre_fix + 'fc1_output']
fixed_param_names = []
if(args.freeze_block == 1):
fixed_param_names = emb_symbol.list_arguments()
elif(args.freeze_block == 2):
emb_symbol = sym.get_internals()[pre_fix + 'bn1_output']
fixed_param_names = emb_symbol.list_arguments()
print(fixed_param_names)
#fixed_aux = emb_symbol.list_auxiliary_states()
#fixed_param_names.extend(fixed_aux)
#print('ffffffffffffffixed params : ', fixed_param_names)
model = mx.mod.Module(
context = ctx,
symbol = sym,
fixed_param_names = fixed_param_names
)
val_dataiter = None
if config.loss_name.find('fusion')>=0:
from pair_fusion_class_image_iter import FaceImageIter
triplet_params = [config.triplet_bag_size, config.triplet_alpha, config.triplet_max_ap]
train_dataiter = FaceImageIter(
batch_size = args.batch_size,
data_shape = data_shape,
path_imgrec = path_imgrec,
shuffle = True,
rand_mirror = config.data_rand_mirror,
mean = mean,
cutoff = config.data_cutoff,
ctx_num = args.ctx_num,
images_per_identity = config.images_per_identity,
triplet_params = triplet_params,
mx_model = model,
fairface_mode = config.fairface_mode,
)
_metric = LossValueMetric()
eval_metrics = [mx.metric.create(_metric)]
elif config.loss_name.find('triplet')>=0:
#from fair_face_triplet_iter import FaceImageIter
from triplet_image_iter import FaceImageIter
if(config.loss_name == 'triplet'):
dis_type = 'e'
elif(config.loss_name == 'atriplet'):
dis_type = 'c'
triplet_params = [config.triplet_bag_size, config.triplet_alpha, config.triplet_max_ap]
train_dataiter = FaceImageIter(
batch_size = args.batch_size,
data_shape = data_shape,
path_imgrec = path_imgrec,
shuffle = True,
rand_mirror = config.data_rand_mirror,
mean = mean,
cutoff = config.data_cutoff,
ctx_num = args.ctx_num,
images_per_identity = config.images_per_identity,
triplet_params = triplet_params,
mx_model = model,
fairface_mode = config.fairface_mode,
dis_type = dis_type,
)
_metric = LossValueMetric()
eval_metrics = [mx.metric.create(_metric)]
elif config.loss_name.find('softmax')>=0:
from image_iter_gluon import FaceImageDataset
train_dataset = FaceImageDataset(
batch_size = args.batch_size,
data_shape = data_shape,
path_imgrec = path_imgrec,
shuffle = True,
rand_mirror = config.data_rand_mirror,
mean = mean,
cutoff = config.data_cutoff,
color_jittering = config.data_color,
images_filter = config.data_images_filter,
selected_attributes = args.selected_attributes,
label_name = ['softmax_label']
)
train_data = mx.gluon.data.DataLoader(train_dataset, args.batch_size, shuffle=True, last_batch="rollover", num_workers=args.num_workers)
train_dataiter = mx.contrib.io.DataLoaderIter(train_data)
metric1 = AccMetric()
eval_metrics = [mx.metric.create(metric1)]
if config.ce_loss:
metric2 = LossValueMetric()
eval_metrics.append( mx.metric.create(metric2) )
else:
from image_iter import FaceImageIter
train_dataiter = FaceImageIter(
batch_size = args.batch_size,
data_shape = data_shape,
path_imgrec = path_imgrec,
shuffle = True,
rand_mirror = config.data_rand_mirror,
mean = mean,
cutoff = config.data_cutoff,
color_jittering = config.data_color,
images_filter = config.data_images_filter,
)
metric1 = AccMetric()
eval_metrics = [mx.metric.create(metric1)]
if config.loss_name == 'final_softmax':
_metric = LossValueMetric()
eval_metrics = [mx.metric.create(_metric)]
if config.ce_loss:
metric2 = LossValueMetric()
eval_metrics.append( mx.metric.create(metric2) )
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="out", magnitude=2) #resnet style
#initializer = mx.init.Xavier(rnd_type='uniform', factor_type="in", magnitude=2)
_rescale = 1.0 / args.ctx_num
clip_gradient = None
if config.fp_16:
_rescale /= config.scale16
clip_gradient = config.gradThres
#opt = optimizer.SGD(learning_rate=args.lr, momentum=args.mom, wd=args.wd, rescale_grad=_rescale)#, multi_precision=config.fp_16)
opt = optimizer.create(args.opt, learning_rate=args.lr, momentum=config.mom, wd=config.wd, rescale_grad=_rescale, multi_precision=config.fp_16, clip_gradient=clip_gradient)
_cb = mx.callback.Speedometer(args.batch_size, args.frequent)
# cos learning rate scheduler
if args.cos_lr:
num_batches = config.num_training_samples // args.batch_size
total_batches = default.end_epoch * num_batches
ver_list = []
ver_name_list = []
for name in config.val_targets:
path = os.path.join(data_dir,name+".bin")
if os.path.exists(path):
data_set = verification.load_bin(path, image_size)
ver_list.append(data_set)
ver_name_list.append(name)
print('ver', name)
def ver_test(nbatch):
results = []
label_shape = None
if(config.net_output == 'ECCV'):
label_shape = (args.batch_size, 2)
for i in range(len(ver_list)):
acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test(ver_list[i], model, args.batch_size, 10, None, label_shape)
print('[%s][%d]XNorm: %f' % (ver_name_list[i], nbatch, xnorm))
#print('[%s][%d]Accuracy: %1.5f+-%1.5f' % (ver_name_list[i], nbatch, acc1, std1))
print('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' % (ver_name_list[i], nbatch, acc2, std2))
results.append(acc2)
return results
highest_acc = [0.0, 0.0] #lfw and target
# highest_acc.append(0.0)
global_step = [0]
save_step = [0]
highestStep = [0]
lr_steps = [int(x) for x in args.lr_steps.split(',')]
print('lr_steps', lr_steps)
def _batch_callback(param):
#global global_step
global_step[0]+=1
mbatch = global_step[0]
if config.useWarmup and (mbatch < config.warmupSteps):
#opt.lr = args.lr * mbatch / config.warmupSteps
opt.lr = 1.0e-8
#print("warmup lr: ", opt.lr)
if (not config.useWarmup) or (config.useWarmup and (mbatch >= config.warmupSteps)):
targetSteps = mbatch
if config.useWarmup:
if mbatch==config.warmupSteps:
opt.lr = args.lr
targetSteps -= config.warmupSteps
if args.cos_lr:
opt.lr = 0.5 * args.lr * (1 + np.cos(np.pi * (targetSteps / total_batches)))
if (targetSteps % 500) == 0:
print('cos lr change to', opt.lr)
else:
for step in lr_steps:
if targetSteps==step:
opt.lr *= 0.1
print('lr change to', opt.lr)
break
_cb(param)
if mbatch%1000==0:
print('lr-batch-epoch:',opt.lr,param.nbatch,param.epoch)
if mbatch>=0 and mbatch%args.verbose==0:
acc_list = ver_test(mbatch)
save_step[0]+=1
msave = save_step[0]
do_save = False
is_highest = False
if len(acc_list)>0:
score = sum(acc_list)
if acc_list[-1]>=highest_acc[-1]:
if acc_list[-1]>highest_acc[-1]:
is_highest = True
else:
if score>=highest_acc[0]:
is_highest = True
highest_acc[0] = score
highest_acc[-1] = acc_list[-1]
highestStep[0] = save_step[0]
if is_highest:
do_save = True
if args.ckpt==0:
do_save = False
elif args.ckpt==2:
do_save = True
elif args.ckpt==3:
msave = 1
if do_save:
print('saving', msave)
arg, aux = model.get_params()
if config.ckpt_embedding:
all_layers = model.symbol.get_internals()
_sym = all_layers['fc1_output']
_arg = {}
for k in arg:
if not k.startswith('fc7'):
_arg[k] = arg[k]
mx.model.save_checkpoint(prefix, msave, _sym, _arg, aux)
else:
mx.model.save_checkpoint(prefix, msave, model.symbol, arg, aux)
print('[%d]Accuracy-Highest: %1.5f, mbatch: %d'%(mbatch, highest_acc[-1], highestStep[0]))
if config.max_steps>0 and mbatch>config.max_steps:
sys.exit(0)
epoch_cb = None
if config.loss_name.find('triplet') < 0:
train_dataiter = mx.io.PrefetchingIter(train_dataiter) #triplet loss unavailable
######
if(config.net_output == 'ECCV'):
class_metric = AccMetric(acc_name = 'class_acc', label_index = 1, pred_index = 4)
eval_metrics.append(mx.metric.create(class_metric))
eval_metrics,
model.fit(train_dataiter,
begin_epoch = begin_epoch,
num_epoch = 999999,
eval_data = val_dataiter,
eval_metric = eval_metrics,
kvstore = args.kvstore,
optimizer = opt,
#optimizer_params = optimizer_params,
initializer = initializer,
arg_params = arg_params,
aux_params = aux_params,
allow_missing = True,
batch_end_callback = _batch_callback,
epoch_end_callback = epoch_cb )
def main():
global args
args = parse_args()
train_net(args)
if __name__ == '__main__':
main()
| 42.370722
| 176
| 0.618522
|
64596992f3c829e471bbcb00e22cc63dbcd789e7
| 890
|
py
|
Python
|
tests/test_tigge.py
|
eengl/grib2io
|
6a3acf8f399faf1c07183919cf8dbcb34f7bec50
|
[
"MIT"
] | 10
|
2020-11-03T01:13:14.000Z
|
2022-02-04T00:24:23.000Z
|
tests/test_tigge.py
|
eengl/grib2io
|
6a3acf8f399faf1c07183919cf8dbcb34f7bec50
|
[
"MIT"
] | 15
|
2021-02-11T02:33:38.000Z
|
2022-03-21T14:34:07.000Z
|
tests/test_tigge.py
|
eengl/grib2io
|
6a3acf8f399faf1c07183919cf8dbcb34f7bec50
|
[
"MIT"
] | 2
|
2021-10-04T23:41:41.000Z
|
2022-03-10T17:38:28.000Z
|
import numpy as np
from mpl_toolkits.basemap import Basemap
from numpy import ma
import pygrib, sys
import matplotlib.pyplot as plt
for grb in pygrib.open('../sampledata/tigge.grb'):
fld = 0.01*grb['values'] # convert to hPa
lats,lons = grb.latlons()
sys.stdout.write('%s %s %s %s' % \
(grb['centre'], fld.shape, fld.min(), fld.max()))
fig=plt.figure(figsize=(10,5))
fig.add_axes([0.1,0.1,0.8,0.8])
m = Basemap(projection='cyl',lon_0=180)
x, y = m(lons,lats)
levels = np.arange(475,1101,25)
CS = m.contourf(x,y,fld,levels,cmap=plt.cm.jet)
plt.colorbar(drawedges=True, shrink=0.8) # draw colorbar
m.drawcoastlines()
m.drawparallels(np.arange(-80,81,20),labels=[1,0,0,0])
m.drawmeridians(np.arange(0,360,60),labels=[0,0,0,1])
m.drawmapboundary()
plt.title(grb['name']+': '+grb['centre'].upper(),fontsize=12)
plt.show()
| 35.6
| 65
| 0.64382
|
1b64ae6d8b439c117729c3881d499cbb8596daf8
| 2,281
|
py
|
Python
|
examples/muscle_driven_ocp/static_arm.py
|
Kilperic13/BiorbdOptim
|
00668bd1ad00366f16a576f3855a6d207c08b30c
|
[
"Apache-2.0"
] | null | null | null |
examples/muscle_driven_ocp/static_arm.py
|
Kilperic13/BiorbdOptim
|
00668bd1ad00366f16a576f3855a6d207c08b30c
|
[
"Apache-2.0"
] | null | null | null |
examples/muscle_driven_ocp/static_arm.py
|
Kilperic13/BiorbdOptim
|
00668bd1ad00366f16a576f3855a6d207c08b30c
|
[
"Apache-2.0"
] | null | null | null |
import biorbd
from biorbd_optim import (
OptimalControlProgram,
Objective,
ProblemType,
Bounds,
QAndQDotBounds,
InitialConditions,
ShowResult,
)
def prepare_ocp(biorbd_model_path, final_time, number_shooting_points, show_online_optim=False):
# --- Options --- #
# Model path
biorbd_model = biorbd.Model(biorbd_model_path)
torque_min, torque_max, torque_init = -1, 1, 0
muscle_min, muscle_max, muscle_init = 0, 1, 0.5
# Add objective functions
objective_functions = (
{"type": Objective.Lagrange.MINIMIZE_TORQUE, "weight": 1},
{"type": Objective.Lagrange.MINIMIZE_MUSCLES_CONTROL, "weight": 1},
{"type": Objective.Mayer.ALIGN_MARKERS, "first_marker": 0, "second_marker": 5, "weight": 1,},
)
# Dynamics
problem_type = ProblemType.muscle_activations_and_torque_driven
# Constraints
constraints = ()
# Path constraint
X_bounds = QAndQDotBounds(biorbd_model)
# Set the initial position
X_bounds.first_node_min = (0.07, 1.4, 0, 0)
X_bounds.first_node_max = (0.07, 1.4, 0, 0)
# Initial guess
X_init = InitialConditions([1.57] * biorbd_model.nbQ() + [0] * biorbd_model.nbQdot())
# Define control path constraint
U_bounds = Bounds(
[torque_min] * biorbd_model.nbGeneralizedTorque() + [muscle_min] * biorbd_model.nbMuscleTotal(),
[torque_max] * biorbd_model.nbGeneralizedTorque() + [muscle_max] * biorbd_model.nbMuscleTotal(),
)
U_init = InitialConditions(
[torque_init] * biorbd_model.nbGeneralizedTorque() + [muscle_init] * biorbd_model.nbMuscleTotal()
)
# ------------- #
return OptimalControlProgram(
biorbd_model,
problem_type,
number_shooting_points,
final_time,
objective_functions,
X_init,
U_init,
X_bounds,
U_bounds,
constraints,
show_online_optim=show_online_optim,
)
if __name__ == "__main__":
ocp = prepare_ocp(biorbd_model_path="arm26.bioMod", final_time=2, number_shooting_points=20, show_online_optim=True)
# --- Solve the program --- #
sol = ocp.solve()
# --- Show results --- #
result = ShowResult(ocp, sol)
result.animate(show_meshes=False)
result.graphs()
| 28.5125
| 120
| 0.659798
|
f2346a31fecf1b02f3d77578ee63f676353be7a7
| 4,124
|
py
|
Python
|
simulation/src/simulation_groundtruth/src/label_camera/label_speaker.py
|
KITcar-Team/kitcar-gazebo-simulation
|
8a9438b5a24c288721ae0302889fe55e26046310
|
[
"MIT"
] | 13
|
2020-06-30T17:18:28.000Z
|
2021-07-20T16:55:35.000Z
|
simulation/src/simulation_groundtruth/src/label_camera/label_speaker.py
|
KITcar-Team/kitcar-gazebo-simulation
|
8a9438b5a24c288721ae0302889fe55e26046310
|
[
"MIT"
] | 1
|
2020-11-10T20:15:42.000Z
|
2020-12-25T18:27:56.000Z
|
simulation/src/simulation_groundtruth/src/label_camera/label_speaker.py
|
KITcar-Team/kitcar-gazebo-simulation
|
8a9438b5a24c288721ae0302889fe55e26046310
|
[
"MIT"
] | 3
|
2020-07-20T09:09:08.000Z
|
2021-07-20T17:00:37.000Z
|
import functools
import math
from typing import List, Tuple
import numpy as np
from gazebo_simulation.msg import CarState as CarStateMsg
from simulation_groundtruth.msg import Section as SectionMsg
from simulation.src.simulation_evaluation.src.speaker.speakers.speaker import Speaker
from simulation.utils.geometry import Polygon, Vector
from .bounding_box import BoundingBox
class LabelSpeaker(Speaker):
"""Speaker that allows to retrieve visible groundtruth objects and their position."""
def listen(self, msg: CarStateMsg):
super().listen(msg)
self.camera_fov = Polygon(msg.view_cone)
LabelSpeaker._get_visible_sections.cache_clear()
@functools.lru_cache(1)
def _get_visible_sections(self) -> List[SectionMsg]:
if not hasattr(self, "camera_fov"):
return []
visibles = []
for sec in self.sections:
line_tuple = self.get_road_lines(sec.id)
left_line, right_line = line_tuple.left, line_tuple.right
if right_line.intersects(self.camera_fov) or left_line.intersects(
self.camera_fov
):
visibles.append(sec)
return visibles
def _extract_bounding_boxes(self, func):
lp_gen = (lp for sec in self._get_visible_sections() for lp in func(sec.id))
bbs = []
for lp in lp_gen:
if lp.frame.intersects(self.camera_fov):
points = lp.frame.get_points()
points += [p + Vector(0, 0, lp.height) for p in points]
bbs.append(
BoundingBox(
world_points=points, class_id=lp.id_, class_description=lp.desc
)
)
return bbs
def _get_visible_obstacles(self) -> List[BoundingBox]:
return self._extract_bounding_boxes(self.get_obstacles_in_section)
def _get_visible_surface_markings(self) -> List[BoundingBox]:
return self._extract_bounding_boxes(self.get_surface_markings_in_section)
def _get_visible_signs(self) -> List[BoundingBox]:
return self._extract_bounding_boxes(
self.get_traffic_signs_in_section,
)
def speak(
self, image_size: Tuple[int, int], horizontal_fov: float
) -> List[BoundingBox]:
"""Create and return all bounding boxes of currently visible objects.
Args:
image_size: Total size of the image. Width and height.
horizontal_fov: Field of view of the camera in horizontal direction.
"""
# Get all bounding boxes sorted from nearest to furthest
bbs = sorted(
self._get_visible_obstacles()
+ self._get_visible_surface_markings()
+ self._get_visible_signs()
)
visible_bbs = []
# Use this image to mark pixels that are already used by another bounding box
# If a bounding is within another bounding box (and behind it),
# it should not be returned.
boxed_img = np.ones(image_size)
for bb in bbs:
# Filter bounding boxes that are not in the camera's field of view
if abs(bb.angle) > math.pi / 2:
continue
bounds = bb.get_bounds()
bounds = (
max(bounds[0], 0),
max(bounds[1], 0),
min(bounds[2], image_size[0] - 1),
min(bounds[3], image_size[1] - 1),
)
# Filter bounding boxes that are behind other boxes.
# More than half is visible
if (
np.sum(boxed_img[bounds[0] : bounds[2] + 1, bounds[1] : bounds[3] + 1])
> (bounds[2] - bounds[0] + 1) * (bounds[3] - bounds[1] + 1) / 2
):
visible_bbs.append(bb)
boxed_img[bounds[0] : bounds[2] + 1, bounds[1] : bounds[3] + 1] = 0
visible_bbs = [
bb
for bb in visible_bbs
if bb.class_id // 100 != 1
or math.cos(bb.orientation - math.pi) > math.cos(math.radians(80))
]
return visible_bbs
| 34.949153
| 89
| 0.598691
|
fec22cf38c2fe4c7eb28fe82441a7da7c796e2ad
| 5,005
|
py
|
Python
|
ddsc/sdk/azure.py
|
Duke-GCB/DukeDSClient
|
7f119a5ee2e674e8deaff1f080caed1953c5cc61
|
[
"MIT"
] | 4
|
2020-06-18T12:30:13.000Z
|
2020-10-12T21:25:54.000Z
|
ddsc/sdk/azure.py
|
Duke-GCB/DukeDSClient
|
7f119a5ee2e674e8deaff1f080caed1953c5cc61
|
[
"MIT"
] | 239
|
2016-02-18T14:44:08.000Z
|
2022-03-11T14:38:56.000Z
|
ddsc/sdk/azure.py
|
Duke-GCB/DukeDSClient
|
7f119a5ee2e674e8deaff1f080caed1953c5cc61
|
[
"MIT"
] | 10
|
2016-02-22T15:01:28.000Z
|
2022-02-21T22:46:26.000Z
|
from ddsc.exceptions import DDSUserException
class Azure(object):
def __init__(self, config):
# placeholder data until Azure backend is live
self._placeholder_projects = [
AzureProject(self, name="user1/Mouse", auth_role="file_downloader"),
AzureProject(self, name="user1/rna", auth_role="project_admin")
]
self._placeholder_files = {
"user1/Mouse": [
AzureFile(self, "/file1.dat", md5="a1335de16b6efeb0f0dba271521c1f9d"),
AzureFile(self, "/data/SRN01.fastq.gz", md5="b1335de16b6efeb0f0dba271521c1f9d"),
AzureFile(self, "/data/SRN02.fastq.gz", md5="c1335de16b6efeb0f0dba271521c1f9d"),
],
"user1/rna": [
AzureFile(self, "/one/SRN01.fastq.gz", md5="d1335de16b6efeb0f0dba271521c1f9d"),
AzureFile(self, "/two/SRN02.fastq.gz", md5="e1335de16b6efeb0f0dba271521c1f9d"),
AzureFile(self, "/three/SRN03.fastq.gz", md5="f1335de16b6efeb0f0dba271521c1f9d"),
AzureFile(self, "/three/nested/SRN04.fastq.gz", md5="g1335de16b6efeb0f0dba271521c1f9d"),
],
}
self._auth_roles = [
AzureAuthRole(self, "project_admin",
"Can update project details, delete project, manage project level permissions "
"and perform all file operations"),
AzureAuthRole(self, "project_viewer", "Can only view project and file meta-data"),
AzureAuthRole(self, "file_downloader", "Can download files"),
AzureAuthRole(self, "file_editor", "Can view download create update and delete files"),
AzureAuthRole(self, "file_uploader", "Can update files"),
]
def get_projects(self):
return self._placeholder_projects
def get_project(self, project_name):
items = [p for p in self._placeholder_projects if p.name == project_name]
if not items:
raise ItemNotFound("Unable to find project named '{}'.".format(project_name))
return items[0]
def get_files(self, project):
return self._placeholder_files.get(project.name, [])
def get_auth_roles(self):
return self._auth_roles
def upload_files(self, project, paths, follow_symlinks, dry_run):
print("Upload files/folders")
print("project", project)
print("paths", paths)
print("follow_symlinks", follow_symlinks)
print("dry_run", dry_run)
def add_user(self, project, netid, auth_role):
print("Add user")
print("project", project)
print("netid", netid)
print("auth_role", auth_role)
def remove_user(self, project, netid):
print("Remove User")
print("project", project)
print("netid", netid)
def download_files(self, project, include_paths, exclude_paths, destination):
print("Download")
print("project", project)
print("include_paths", include_paths)
print("exclude_paths", exclude_paths)
print("destination", destination)
def share(self, project, netid, auth_role):
print("Share")
print("project", project)
print("netid", netid)
print("auth_role", auth_role)
def deliver(self, project, netid, copy_project, resend, msg_file, share_usernames):
print("Deliver ")
print("project", project)
print("netid", netid)
print("copy_project", copy_project)
print("resend", resend)
print("msg_file", msg_file)
print("share_usernames", share_usernames)
def delete(self, project, remote_path):
print("Delete")
print("project", project)
print("remote_path", remote_path)
def move(self, project, source_remote_path, target_remote_path):
print("Move")
print("project", project)
print("source_remote_path", source_remote_path)
print("target_remote_path", target_remote_path)
class AzureProject(object):
def __init__(self, azure, name, auth_role):
self.azure = azure
self.name = name
self.auth_role = auth_role
def get_url(self):
return " TODO"
def size_str(self):
print()
print("Name:", self.name)
print("URL:", self.get_url())
print("Size:", self.get_size_str())
print()
def get_size_str(self):
return "TODO"
def __str__(self):
return "<AzureProject name={} auth_role={}>".format(self.name, self.auth_role)
class AzureFile(object):
def __init__(self, azure, name, md5):
self.azure = azure
self.name = name
self.md5 = md5
def __str__(self):
return "<AzureFile name={} md5={}>".format(self.name, self.md5)
class AzureAuthRole(object):
def __init__(self, azure, id, description):
self.azure = azure
self.id = id
self.description = description
class ItemNotFound(DDSUserException):
pass
| 35
| 105
| 0.622178
|
e0ec66a3af65bb2d18207915fabb55eb806a3352
| 577
|
py
|
Python
|
explosig_data/__init__.py
|
lrgr/explosig-data
|
9fd11e5252e3fb112dc7a3e55cb7b40d8b8d5efb
|
[
"MIT"
] | 1
|
2020-01-30T17:55:03.000Z
|
2020-01-30T17:55:03.000Z
|
explosig_data/__init__.py
|
keller-mark/explosig-data
|
9fd11e5252e3fb112dc7a3e55cb7b40d8b8d5efb
|
[
"MIT"
] | 1
|
2020-02-20T15:03:54.000Z
|
2020-02-20T23:44:14.000Z
|
explosig_data/__init__.py
|
keller-mark/explosig-data
|
9fd11e5252e3fb112dc7a3e55cb7b40d8b8d5efb
|
[
"MIT"
] | 1
|
2020-01-12T14:17:20.000Z
|
2020-01-12T14:17:20.000Z
|
import os
import logging
from .constants import *
from .utils import clean_ssm_df
from .ssm_extended import extend_ssm_df
from .ssm_counts import counts_from_extended_ssm_df
from .ssm_container import SimpleSomaticMutationContainer
from .data_source_ICGC import standardize_ICGC_ssm_file
from .data_source_TCGA import standardize_TCGA_maf_file
def _setup():
os.makedirs(EXPLOSIG_DATA_DIR, exist_ok=True)
os.makedirs(os.path.join(EXPLOSIG_DATA_DIR, 'genes'), exist_ok=True)
os.makedirs(os.path.join(EXPLOSIG_DATA_DIR, 'genomes'), exist_ok=True)
_setup()
| 25.086957
| 74
| 0.811092
|
1fcdaa1ce75e66ddf47b3e2831b96e0fb83e8cab
| 19,249
|
py
|
Python
|
p099.py
|
arpit0891/Project-euler
|
ab36b33c578578595bb518508fa2fe5862f4a044
|
[
"MIT"
] | 1
|
2020-05-14T09:22:32.000Z
|
2020-05-14T09:22:32.000Z
|
p099.py
|
prve17/Project-Euler
|
1ff72404ca9ebe7de2eab83d43960d86bc487515
|
[
"MIT"
] | 1
|
2020-03-13T12:42:28.000Z
|
2020-05-13T13:26:32.000Z
|
p099.py
|
prve17/Project-Euler
|
1ff72404ca9ebe7de2eab83d43960d86bc487515
|
[
"MIT"
] | 3
|
2020-05-13T13:39:46.000Z
|
2020-06-26T10:44:53.000Z
|
def compute():
ans = None
maxval = None
for (i, val) in enumerate(DATA):
if maxval is None or compare_powers(val, maxval) > 0:
ans = i + 1
maxval = val
return str(ans)
def compare_powers(pairx, pairy):
# First try fast low-precision computations, retrying with increasing precision
precision = 16
while precision <= 1024:
# Use interval arithmetic for approximate comparisons
xlow = BigFloat(pairx[0]).power(pairx[1], precision, False)
xhigh = BigFloat(pairx[0]).power(pairx[1], precision, True )
ylow = BigFloat(pairy[0]).power(pairy[1], precision, False)
yhigh = BigFloat(pairy[0]).power(pairy[1], precision, True )
if xhigh.compare_to(ylow) < 0:
return -1
elif xlow.compare_to(yhigh) > 0:
return +1
else:
precision *= 2
# Otherwise do full-precision comparison (slow)
x = pairx[0]**pairx[1]
y = pairy[0]**pairy[1]
if x < y:
return -1
elif x > y:
return +1
else:
return 0
# Represents a strictly positive number equal to mantissa * 2^exponent
class BigFloat(object):
def __init__(self, man, exp=0):
self.mantissa = man
self.exponent = exp
# The output's mantissa will have 'precision' or fewer bits
def multiply(self, other, precision, roundup):
man = self.mantissa * other.mantissa
exp = self.exponent + other.exponent
excess = man.bit_length() - precision
if excess > 0:
if roundup:
mask = (1 << excess) - 1
if mask & man != 0:
man += 1 << excess
excess = man.bit_length() - precision # In case 'man' is bumped up to the next power of 2
man >>= excess
exp += excess
return BigFloat(man, exp)
# Exponentiation by squaring
def power(self, y, precision, roundup):
if y < 0 or precision <= 0:
raise ValueError()
x = self
z = BigFloat(1, 0)
while y != 0:
if y & 1 != 0:
z = z.multiply(x, precision, roundup)
x = x.multiply(x, precision, roundup)
y >>= 1
return z
def compare_to(self, other):
minexp = min(self.exponent, other.exponent)
tempx = self .mantissa << (self .exponent - minexp)
tempy = other.mantissa << (other.exponent - minexp)
if tempx < tempy:
return -1
elif tempx > tempy:
return +1
else:
return 0
DATA = ( # 10 pairs per line
(519432,525806), (632382,518061), (78864,613712), (466580,530130), (780495,510032), (525895,525320), (15991,714883), (960290,502358), (760018,511029), (166800,575487),
(210884,564478), (555151,523163), (681146,515199), (563395,522587), (738250,512126), (923525,503780), (595148,520429), (177108,572629), (750923,511482), (440902,532446),
(881418,505504), (422489,534197), (979858,501616), (685893,514935), (747477,511661), (167214,575367), (234140,559696), (940238,503122), (728969,512609), (232083,560102),
(900971,504694), (688801,514772), (189664,569402), (891022,505104), (445689,531996), (119570,591871), (821453,508118), (371084,539600), (911745,504251), (623655,518600),
(144361,582486), (352442,541775), (420726,534367), (295298,549387), (6530,787777), (468397,529976), (672336,515696), (431861,533289), (84228,610150), (805376,508857),
(444409,532117), (33833,663511), (381850,538396), (402931,536157), (92901,604930), (304825,548004), (731917,512452), (753734,511344), (51894,637373), (151578,580103),
(295075,549421), (303590,548183), (333594,544123), (683952,515042), (60090,628880), (951420,502692), (28335,674991), (714940,513349), (343858,542826), (549279,523586),
(804571,508887), (260653,554881), (291399,549966), (402342,536213), (408889,535550), (40328,652524), (375856,539061), (768907,510590), (165993,575715), (976327,501755),
(898500,504795), (360404,540830), (478714,529095), (694144,514472), (488726,528258), (841380,507226), (328012,544839), (22389,690868), (604053,519852), (329514,544641),
(772965,510390), (492798,527927), (30125,670983), (895603,504906), (450785,531539), (840237,507276), (380711,538522), (63577,625673), (76801,615157), (502694,527123),
(597706,520257), (310484,547206), (944468,502959), (121283,591152), (451131,531507), (566499,522367), (425373,533918), (40240,652665), (39130,654392), (714926,513355),
(469219,529903), (806929,508783), (287970,550487), (92189,605332), (103841,599094), (671839,515725), (452048,531421), (987837,501323), (935192,503321), (88585,607450),
(613883,519216), (144551,582413), (647359,517155), (213902,563816), (184120,570789), (258126,555322), (502546,527130), (407655,535678), (401528,536306), (477490,529193),
(841085,507237), (732831,512408), (833000,507595), (904694,504542), (581435,521348), (455545,531110), (873558,505829), (94916,603796), (720176,513068), (545034,523891),
(246348,557409), (556452,523079), (832015,507634), (173663,573564), (502634,527125), (250732,556611), (569786,522139), (216919,563178), (521815,525623), (92304,605270),
(164446,576167), (753413,511364), (11410,740712), (448845,531712), (925072,503725), (564888,522477), (7062,780812), (641155,517535), (738878,512100), (636204,517828),
(372540,539436), (443162,532237), (571192,522042), (655350,516680), (299741,548735), (581914,521307), (965471,502156), (513441,526277), (808682,508700), (237589,559034),
(543300,524025), (804712,508889), (247511,557192), (543486,524008), (504383,526992), (326529,545039), (792493,509458), (86033,609017), (126554,589005), (579379,521481),
(948026,502823), (404777,535969), (265767,554022), (266876,553840), (46631,643714), (492397,527958), (856106,506581), (795757,509305), (748946,511584), (294694,549480),
(409781,535463), (775887,510253), (543747,523991), (210592,564536), (517119,525990), (520253,525751), (247926,557124), (592141,520626), (346580,542492), (544969,523902),
(506501,526817), (244520,557738), (144745,582349), (69274,620858), (292620,549784), (926027,503687), (736320,512225), (515528,526113), (407549,535688), (848089,506927),
(24141,685711), (9224,757964), (980684,501586), (175259,573121), (489160,528216), (878970,505604), (969546,502002), (525207,525365), (690461,514675), (156510,578551),
(659778,516426), (468739,529945), (765252,510770), (76703,615230), (165151,575959), (29779,671736), (928865,503569), (577538,521605), (927555,503618), (185377,570477),
(974756,501809), (800130,509093), (217016,563153), (365709,540216), (774508,510320), (588716,520851), (631673,518104), (954076,502590), (777828,510161), (990659,501222),
(597799,520254), (786905,509727), (512547,526348), (756449,511212), (869787,505988), (653747,516779), (84623,609900), (839698,507295), (30159,670909), (797275,509234),
(678136,515373), (897144,504851), (989554,501263), (413292,535106), (55297,633667), (788650,509637), (486748,528417), (150724,580377), (56434,632490), (77207,614869),
(588631,520859), (611619,519367), (100006,601055), (528924,525093), (190225,569257), (851155,506789), (682593,515114), (613043,519275), (514673,526183), (877634,505655),
(878905,505602), (1926,914951), (613245,519259), (152481,579816), (841774,507203), (71060,619442), (865335,506175), (90244,606469), (302156,548388), (399059,536557),
(478465,529113), (558601,522925), (69132,620966), (267663,553700), (988276,501310), (378354,538787), (529909,525014), (161733,576968), (758541,511109), (823425,508024),
(149821,580667), (269258,553438), (481152,528891), (120871,591322), (972322,501901), (981350,501567), (676129,515483), (950860,502717), (119000,592114), (392252,537272),
(191618,568919), (946699,502874), (289555,550247), (799322,509139), (703886,513942), (194812,568143), (261823,554685), (203052,566221), (217330,563093), (734748,512313),
(391759,537328), (807052,508777), (564467,522510), (59186,629748), (113447,594545), (518063,525916), (905944,504492), (613922,519213), (439093,532607), (445946,531981),
(230530,560399), (297887,549007), (459029,530797), (403692,536075), (855118,506616), (963127,502245), (841711,507208), (407411,535699), (924729,503735), (914823,504132),
(333725,544101), (176345,572832), (912507,504225), (411273,535308), (259774,555036), (632853,518038), (119723,591801), (163902,576321), (22691,689944), (402427,536212),
(175769,572988), (837260,507402), (603432,519893), (313679,546767), (538165,524394), (549026,523608), (61083,627945), (898345,504798), (992556,501153), (369999,539727),
(32847,665404), (891292,505088), (152715,579732), (824104,507997), (234057,559711), (730507,512532), (960529,502340), (388395,537687), (958170,502437), (57105,631806),
(186025,570311), (993043,501133), (576770,521664), (215319,563513), (927342,503628), (521353,525666), (39563,653705), (752516,511408), (110755,595770), (309749,547305),
(374379,539224), (919184,503952), (990652,501226), (647780,517135), (187177,570017), (168938,574877), (649558,517023), (278126,552016), (162039,576868), (658512,516499),
(498115,527486), (896583,504868), (561170,522740), (747772,511647), (775093,510294), (652081,516882), (724905,512824), (499707,527365), (47388,642755), (646668,517204),
(571700,522007), (180430,571747), (710015,513617), (435522,532941), (98137,602041), (759176,511070), (486124,528467), (526942,525236), (878921,505604), (408313,535602),
(926980,503640), (882353,505459), (566887,522345), (3326,853312), (911981,504248), (416309,534800), (392991,537199), (622829,518651), (148647,581055), (496483,527624),
(666314,516044), (48562,641293), (672618,515684), (443676,532187), (274065,552661), (265386,554079), (347668,542358), (31816,667448), (181575,571446), (961289,502320),
(365689,540214), (987950,501317), (932299,503440), (27388,677243), (746701,511701), (492258,527969), (147823,581323), (57918,630985), (838849,507333), (678038,515375),
(27852,676130), (850241,506828), (818403,508253), (131717,587014), (850216,506834), (904848,504529), (189758,569380), (392845,537217), (470876,529761), (925353,503711),
(285431,550877), (454098,531234), (823910,508003), (318493,546112), (766067,510730), (261277,554775), (421530,534289), (694130,514478), (120439,591498), (213308,563949),
(854063,506662), (365255,540263), (165437,575872), (662240,516281), (289970,550181), (847977,506933), (546083,523816), (413252,535113), (975829,501767), (361540,540701),
(235522,559435), (224643,561577), (736350,512229), (328303,544808), (35022,661330), (307838,547578), (474366,529458), (873755,505819), (73978,617220), (827387,507845),
(670830,515791), (326511,545034), (309909,547285), (400970,536363), (884827,505352), (718307,513175), (28462,674699), (599384,520150), (253565,556111), (284009,551093),
(343403,542876), (446557,531921), (992372,501160), (961601,502308), (696629,514342), (919537,503945), (894709,504944), (892201,505051), (358160,541097), (448503,531745),
(832156,507636), (920045,503924), (926137,503675), (416754,534757), (254422,555966), (92498,605151), (826833,507873), (660716,516371), (689335,514746), (160045,577467),
(814642,508425), (969939,501993), (242856,558047), (76302,615517), (472083,529653), (587101,520964), (99066,601543), (498005,527503), (709800,513624), (708000,513716),
(20171,698134), (285020,550936), (266564,553891), (981563,501557), (846502,506991), (334,1190800), (209268,564829), (9844,752610), (996519,501007), (410059,535426),
(432931,533188), (848012,506929), (966803,502110), (983434,501486), (160700,577267), (504374,526989), (832061,507640), (392825,537214), (443842,532165), (440352,532492),
(745125,511776), (13718,726392), (661753,516312), (70500,619875), (436952,532814), (424724,533973), (21954,692224), (262490,554567), (716622,513264), (907584,504425),
(60086,628882), (837123,507412), (971345,501940), (947162,502855), (139920,584021), (68330,621624), (666452,516038), (731446,512481), (953350,502619), (183157,571042),
(845400,507045), (651548,516910), (20399,697344), (861779,506331), (629771,518229), (801706,509026), (189207,569512), (737501,512168), (719272,513115), (479285,529045),
(136046,585401), (896746,504860), (891735,505067), (684771,514999), (865309,506184), (379066,538702), (503117,527090), (621780,518717), (209518,564775), (677135,515423),
(987500,501340), (197049,567613), (329315,544673), (236756,559196), (357092,541226), (520440,525733), (213471,563911), (956852,502490), (702223,514032), (404943,535955),
(178880,572152), (689477,514734), (691351,514630), (866669,506128), (370561,539656), (739805,512051), (71060,619441), (624861,518534), (261660,554714), (366137,540160),
(166054,575698), (601878,519990), (153445,579501), (279899,551729), (379166,538691), (423209,534125), (675310,515526), (145641,582050), (691353,514627), (917468,504026),
(284778,550976), (81040,612235), (161699,576978), (616394,519057), (767490,510661), (156896,578431), (427408,533714), (254849,555884), (737217,512182), (897133,504851),
(203815,566051), (270822,553189), (135854,585475), (778805,510111), (784373,509847), (305426,547921), (733418,512375), (732087,512448), (540668,524215), (702898,513996),
(628057,518328), (640280,517587), (422405,534204), (10604,746569), (746038,511733), (839808,507293), (457417,530938), (479030,529064), (341758,543090), (620223,518824),
(251661,556451), (561790,522696), (497733,527521), (724201,512863), (489217,528217), (415623,534867), (624610,518548), (847541,506953), (432295,533249), (400391,536421),
(961158,502319), (139173,584284), (421225,534315), (579083,521501), (74274,617000), (701142,514087), (374465,539219), (217814,562985), (358972,540995), (88629,607424),
(288597,550389), (285819,550812), (538400,524385), (809930,508645), (738326,512126), (955461,502535), (163829,576343), (826475,507891), (376488,538987), (102234,599905),
(114650,594002), (52815,636341), (434037,533082), (804744,508880), (98385,601905), (856620,506559), (220057,562517), (844734,507078), (150677,580387), (558697,522917),
(621751,518719), (207067,565321), (135297,585677), (932968,503404), (604456,519822), (579728,521462), (244138,557813), (706487,513800), (711627,513523), (853833,506674),
(497220,527562), (59428,629511), (564845,522486), (623621,518603), (242689,558077), (125091,589591), (363819,540432), (686453,514901), (656813,516594), (489901,528155),
(386380,537905), (542819,524052), (243987,557841), (693412,514514), (488484,528271), (896331,504881), (336730,543721), (728298,512647), (604215,519840), (153729,579413),
(595687,520398), (540360,524240), (245779,557511), (924873,503730), (509628,526577), (528523,525122), (3509,847707), (522756,525555), (895447,504922), (44840,646067),
(45860,644715), (463487,530404), (398164,536654), (894483,504959), (619415,518874), (966306,502129), (990922,501212), (835756,507474), (548881,523618), (453578,531282),
(474993,529410), (80085,612879), (737091,512193), (50789,638638), (979768,501620), (792018,509483), (665001,516122), (86552,608694), (462772,530469), (589233,520821),
(891694,505072), (592605,520594), (209645,564741), (42531,649269), (554376,523226), (803814,508929), (334157,544042), (175836,572970), (868379,506051), (658166,516520),
(278203,551995), (966198,502126), (627162,518387), (296774,549165), (311803,547027), (843797,507118), (702304,514032), (563875,522553), (33103,664910), (191932,568841),
(543514,524006), (506835,526794), (868368,506052), (847025,506971), (678623,515342), (876139,505726), (571997,521984), (598632,520198), (213590,563892), (625404,518497),
(726508,512738), (689426,514738), (332495,544264), (411366,535302), (242546,558110), (315209,546555), (797544,509219), (93889,604371), (858879,506454), (124906,589666),
(449072,531693), (235960,559345), (642403,517454), (720567,513047), (705534,513858), (603692,519870), (488137,528302), (157370,578285), (63515,625730), (666326,516041),
(619226,518883), (443613,532186), (597717,520257), (96225,603069), (86940,608450), (40725,651929), (460976,530625), (268875,553508), (270671,553214), (363254,540500),
(384248,538137), (762889,510892), (377941,538833), (278878,551890), (176615,572755), (860008,506412), (944392,502967), (608395,519571), (225283,561450), (45095,645728),
(333798,544090), (625733,518476), (995584,501037), (506135,526853), (238050,558952), (557943,522972), (530978,524938), (634244,517949), (177168,572616), (85200,609541),
(953043,502630), (523661,525484), (999295,500902), (840803,507246), (961490,502312), (471747,529685), (380705,538523), (911180,504275), (334149,544046), (478992,529065),
(325789,545133), (335884,543826), (426976,533760), (749007,511582), (667067,516000), (607586,519623), (674054,515599), (188534,569675), (565185,522464), (172090,573988),
(87592,608052), (907432,504424), (8912,760841), (928318,503590), (757917,511138), (718693,513153), (315141,546566), (728326,512645), (353492,541647), (638429,517695),
(628892,518280), (877286,505672), (620895,518778), (385878,537959), (423311,534113), (633501,517997), (884833,505360), (883402,505416), (999665,500894), (708395,513697),
(548142,523667), (756491,511205), (987352,501340), (766520,510705), (591775,520647), (833758,507563), (843890,507108), (925551,503698), (74816,616598), (646942,517187),
(354923,541481), (256291,555638), (634470,517942), (930904,503494), (134221,586071), (282663,551304), (986070,501394), (123636,590176), (123678,590164), (481717,528841),
(423076,534137), (866246,506145), (93313,604697), (783632,509880), (317066,546304), (502977,527103), (141272,583545), (71708,618938), (617748,518975), (581190,521362),
(193824,568382), (682368,515131), (352956,541712), (351375,541905), (505362,526909), (905165,504518), (128645,588188), (267143,553787), (158409,577965), (482776,528754),
(628896,518282), (485233,528547), (563606,522574), (111001,595655), (115920,593445), (365510,540237), (959724,502374), (938763,503184), (930044,503520), (970959,501956),
(913658,504176), (68117,621790), (989729,501253), (567697,522288), (820427,508163), (54236,634794), (291557,549938), (124961,589646), (403177,536130), (405421,535899),
(410233,535417), (815111,508403), (213176,563974), (83099,610879), (998588,500934), (513640,526263), (129817,587733), (1820,921851), (287584,550539), (299160,548820),
(860621,506386), (529258,525059), (586297,521017), (953406,502616), (441234,532410), (986217,501386), (781938,509957), (461247,530595), (735424,512277), (146623,581722),
(839838,507288), (510667,526494), (935085,503327), (737523,512167), (303455,548204), (992779,501145), (60240,628739), (939095,503174), (794368,509370), (501825,527189),
(459028,530798), (884641,505363), (512287,526364), (835165,507499), (307723,547590), (160587,577304), (735043,512300), (493289,527887), (110717,595785), (306480,547772),
(318593,546089), (179810,571911), (200531,566799), (314999,546580), (197020,567622), (301465,548487), (237808,559000), (131944,586923), (882527,505449), (468117,530003),
(711319,513541), (156240,578628), (965452,502162), (992756,501148), (437959,532715), (739938,512046), (614249,519196), (391496,537356), (62746,626418), (688215,514806),
(75501,616091), (883573,505412), (558824,522910), (759371,511061), (173913,573489), (891351,505089), (727464,512693), (164833,576051), (812317,508529), (540320,524243),
(698061,514257), (69149,620952), (471673,529694), (159092,577753), (428134,533653), (89997,606608), (711061,513557), (779403,510081), (203327,566155), (798176,509187),
(667688,515963), (636120,517833), (137410,584913), (217615,563034), (556887,523038), (667229,515991), (672276,515708), (325361,545187), (172115,573985), (13846,725685),
)
if __name__ == "__main__":
print(compute())
| 97.71066
| 170
| 0.694166
|
fcfada9396e0996a819c830f8371a6649b9d01f4
| 6,039
|
py
|
Python
|
crafts/bin/craft_mac_app.py
|
gwk/glossy
|
6976ca4fd1efc09d9cd670b1fe37817c05b4b529
|
[
"CC0-1.0"
] | 7
|
2019-05-04T00:51:38.000Z
|
2021-12-10T15:36:31.000Z
|
crafts/bin/craft_mac_app.py
|
gwk/glossy
|
6976ca4fd1efc09d9cd670b1fe37817c05b4b529
|
[
"CC0-1.0"
] | null | null | null |
crafts/bin/craft_mac_app.py
|
gwk/glossy
|
6976ca4fd1efc09d9cd670b1fe37817c05b4b529
|
[
"CC0-1.0"
] | 1
|
2016-07-30T22:38:08.000Z
|
2016-07-30T22:38:08.000Z
|
# Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/.
'''
`craft-mac-app` is an experimental build tool that builds a complete mac app without using Xcode.
The steps to build a functioning app were discovered by looking at the raw build logs in an Xcode application.
This is not currently in use and may need updating.
'''
import plistlib
import re
from argparse import ArgumentParser, Namespace
from typing import Any, BinaryIO, Dict, List, Optional
from crafts import CraftConfig, load_craft_config
from pithy.filestatus import file_mtime, file_mtime_or_zero
from pithy.fs import copy_path, make_dirs, path_dir, path_exists, walk_files
from pithy.io import outSL, shell_cmd_str
from pithy.path import norm_path, path_join
from pithy.string import replace_prefix
from pithy.task import run, runO
def main() -> None:
arg_parser = ArgumentParser(description='Build Mac Swift apps using the Swift Package Manager (without Xcode).')
args = arg_parser.parse_args()
conf = load_craft_config()
build(args, conf)
def build(args:Namespace, conf:CraftConfig) -> None:
build_dir = conf.build_dir
sources = conf.sources
for source in sources:
if not path_exists(source, follow=True):
exit(f'craft error: source does not exist: {source!r}')
#sdk_dir = f'{dev_dir}/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk' # The versioned SDK just links to the unversioned one.
mode_dir = f'{build_dir}/debug' # TODO: support other modes/configurations.
# Build program.
run(['craft-swift'], exits=1)
# Bundle paths.
bundle_path = f'{mode_dir}/{conf.product_name}.app'
contents_path = bundle_path + '/Contents'
frameworks_path = contents_path + '/Frameworks'
macos_path = contents_path + '/MacOS'
resources_path = contents_path + '/Resources'
# Make directories.
for path in (bundle_path, contents_path, frameworks_path, macos_path, resources_path):
make_dirs(path)
# Copy executable.
exe_src = f'{mode_dir}/{conf.product_name}'
exe_dst = f'{macos_path}/{conf.product_name}'
copy_path(exe_src, exe_dst)
# Compile image assets.
img_deps_path = f'{mode_dir}/image-deps.txt'
img_info_path = f'{mode_dir}/icon.plist'
actool_cmd = [ 'xcrun', 'actool',
'--output-format', 'human-readable-text',
#'--notices',
'--warnings',
'--export-dependency-info', img_deps_path,
'--output-partial-info-plist', img_info_path,
'--app-icon', 'AppIcon',
'--enable-on-demand-resources', 'NO',
'--target-device', 'mac',
'--minimum-deployment-target', conf.target_macOS,
'--platform', 'macosx',
'--product-type', 'com.apple.product-type.application',
'--compile', resources_path,
'images.xcassets']
_ = runO(actool_cmd, exits=True) # output is not helpful.
#img_deps = open(img_deps_path).read()
img_info:Dict[str,Any] = plistlib.load(open(img_info_path, 'rb'))
#errL('img_deps:\n', img_deps, '\n')
#errP(img_info, label='img_info')
# Generate Info.plist.
plist_path = f'{contents_path}/Info.plist'
with open(plist_path, 'wb') as f:
gen_plist(f,
EXECUTABLE_NAME=conf.product_name,
PRODUCT_BUNDLE_IDENTIFIER=conf.product_identifier,
PRODUCT_NAME=conf.product_name,
MACOSX_DEPLOYMENT_TARGET=conf.target_macOS,
copyright=conf.copyright,
principle_class='NSApplication',
**img_info)
# Copy frameworks.
# Copy resources.
for res_root, dst_root in conf.resources.items():
build_dst_root = path_join(build_dir, dst_root)
for res_path in walk_files(res_root):
dst_path = norm_path(replace_prefix(res_path, prefix=res_root, replacement=build_dst_root))
res_mtime = file_mtime(res_path, follow=True)
dst_mtime = file_mtime_or_zero(dst_path, follow=True)
if res_mtime == dst_mtime: continue
outSL(res_path, '->', dst_path)
if res_mtime < dst_mtime: exit(f'resource build copy was subsequently modified: {dst_path}')
make_dirs(path_dir(dst_path))
copy_path(res_path, dst_path)
# Touch the bundle.
run(['touch', '-c', bundle_path], exits=True)
# TODO: register with launch services?
def gen_plist(dst_file:BinaryIO, EXECUTABLE_NAME:Optional[str], PRODUCT_BUNDLE_IDENTIFIER:Optional[str],
PRODUCT_NAME:Optional[str], MACOSX_DEPLOYMENT_TARGET:str, copyright:str, principle_class:str, **items:str) -> None:
d = {
'BuildMachineOSBuild': '17A362a', # TODO.
'CFBundleDevelopmentRegion': 'en',
'CFBundleExecutable': EXECUTABLE_NAME,
'CFBundleIdentifier': PRODUCT_BUNDLE_IDENTIFIER,
'CFBundleInfoDictionaryVersion': '6.0',
'CFBundleName': PRODUCT_NAME,
'CFBundlePackageType': 'APPL',
'CFBundleShortVersionString': '1.0', # TODO.
'CFBundleSignature': '????',
'CFBundleSupportedPlatforms': ['MacOSX'],
'CFBundleVersion': '1', # TODO.
'DTCompiler': 'com.apple.compilers.llvm.clang.1_0', # TODO.
'DTPlatformBuild': '9A235', # TODO.
'DTPlatformVersion': 'GM', # TODO.
'DTSDKBuild': '17A360', # TODO.
'DTSDKName': 'macosx10.15', # TODO.
'DTXcode': '0900', # TODO.
'DTXcodeBuild': '9A235', # TODO.
'LSMinimumSystemVersion': MACOSX_DEPLOYMENT_TARGET,
'NSHumanReadableCopyright': copyright,
'NSPrincipalClass': principle_class,
**items
}
plistlib.dump(d, dst_file)
def detect_swift_imports(swift_source_paths:List[str]) -> List[str]:
# Prior to swift 5 it was necessary to copy swift libs into the app.
# This is not currently used but we are hanging onto it for now.
egrep_cmd = ['egrep', '--no-filename', '--only-matching', r'\s*import .*'] + swift_source_paths
print(shell_cmd_str(egrep_cmd))
swift_import_lines = list(filter(None, runO(egrep_cmd).split('\n'))) # TODO: use run_gen.
return sorted(set(trim_import_statement(line) for line in swift_import_lines))
def trim_import_statement(statement:str) -> str:
m = re.match(r'\s*import (\w+)', statement)
if not m: raise ValueError(f'egrep found bad import line: {statement!r}')
return m[1]
if __name__ == '__main__': main()
| 36.823171
| 132
| 0.714522
|
f2357a4e4a34a2c4d4652e17052f3088c074b4a6
| 20,278
|
py
|
Python
|
src/python/grpcio_tests/tests/channelz/_channelz_servicer_test.py
|
timgates42/grpc
|
f78966eb522fe37bff65e609a7847cf473483291
|
[
"Apache-2.0"
] | 2
|
2019-08-16T08:14:27.000Z
|
2020-02-08T02:16:15.000Z
|
src/python/grpcio_tests/tests/channelz/_channelz_servicer_test.py
|
timgates42/grpc
|
f78966eb522fe37bff65e609a7847cf473483291
|
[
"Apache-2.0"
] | null | null | null |
src/python/grpcio_tests/tests/channelz/_channelz_servicer_test.py
|
timgates42/grpc
|
f78966eb522fe37bff65e609a7847cf473483291
|
[
"Apache-2.0"
] | 2
|
2019-08-16T07:52:52.000Z
|
2020-08-14T13:39:16.000Z
|
# Copyright 2018 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of grpc_channelz.v1.channelz."""
import unittest
from concurrent import futures
import grpc
# TODO(https://github.com/grpc/grpc/issues/19863): Remove.
try:
from src.python.grpcio_channelz.grpc_channelz.v1 import channelz
from src.python.grpcio_channelz.grpc_channelz.v1 import channelz_pb2
from src.python.grpcio_channelz.grpc_channelz.v1 import channelz_pb2_grpc
except ImportError:
from grpc_channelz.v1 import channelz
from grpc_channelz.v1 import channelz_pb2
from grpc_channelz.v1 import channelz_pb2_grpc
from tests.unit import test_common
from tests.unit.framework.common import test_constants
_SUCCESSFUL_UNARY_UNARY = '/test/SuccessfulUnaryUnary'
_FAILED_UNARY_UNARY = '/test/FailedUnaryUnary'
_SUCCESSFUL_STREAM_STREAM = '/test/SuccessfulStreamStream'
_REQUEST = b'\x00\x00\x00'
_RESPONSE = b'\x01\x01\x01'
_DISABLE_REUSE_PORT = (('grpc.so_reuseport', 0),)
_ENABLE_CHANNELZ = (('grpc.enable_channelz', 1),)
_DISABLE_CHANNELZ = (('grpc.enable_channelz', 0),)
def _successful_unary_unary(request, servicer_context):
return _RESPONSE
def _failed_unary_unary(request, servicer_context):
servicer_context.set_code(grpc.StatusCode.INTERNAL)
servicer_context.set_details("Channelz Test Intended Failure")
def _successful_stream_stream(request_iterator, servicer_context):
for _ in request_iterator:
yield _RESPONSE
class _GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == _SUCCESSFUL_UNARY_UNARY:
return grpc.unary_unary_rpc_method_handler(_successful_unary_unary)
elif handler_call_details.method == _FAILED_UNARY_UNARY:
return grpc.unary_unary_rpc_method_handler(_failed_unary_unary)
elif handler_call_details.method == _SUCCESSFUL_STREAM_STREAM:
return grpc.stream_stream_rpc_method_handler(
_successful_stream_stream)
else:
return None
class _ChannelServerPair(object):
def __init__(self):
# Server will enable channelz service
self.server = grpc.server(
futures.ThreadPoolExecutor(max_workers=3),
options=_DISABLE_REUSE_PORT + _ENABLE_CHANNELZ)
port = self.server.add_insecure_port('[::]:0')
self.server.add_generic_rpc_handlers((_GenericHandler(),))
self.server.start()
# Channel will enable channelz service...
self.channel = grpc.insecure_channel('localhost:%d' % port,
_ENABLE_CHANNELZ)
def _generate_channel_server_pairs(n):
return [_ChannelServerPair() for i in range(n)]
def _close_channel_server_pairs(pairs):
for pair in pairs:
pair.server.stop(None)
pair.channel.close()
class ChannelzServicerTest(unittest.TestCase):
def _send_successful_unary_unary(self, idx):
_, r = self._pairs[idx].channel.unary_unary(
_SUCCESSFUL_UNARY_UNARY).with_call(_REQUEST)
self.assertEqual(r.code(), grpc.StatusCode.OK)
def _send_failed_unary_unary(self, idx):
try:
self._pairs[idx].channel.unary_unary(_FAILED_UNARY_UNARY).with_call(
_REQUEST)
except grpc.RpcError:
return
else:
self.fail("This call supposed to fail")
def _send_successful_stream_stream(self, idx):
response_iterator = self._pairs[idx].channel.stream_stream(
_SUCCESSFUL_STREAM_STREAM).__call__(
iter([_REQUEST] * test_constants.STREAM_LENGTH))
cnt = 0
for _ in response_iterator:
cnt += 1
self.assertEqual(cnt, test_constants.STREAM_LENGTH)
def _get_channel_id(self, idx):
"""Channel id may not be consecutive"""
resp = self._channelz_stub.GetTopChannels(
channelz_pb2.GetTopChannelsRequest(start_channel_id=0))
self.assertGreater(len(resp.channel), idx)
return resp.channel[idx].ref.channel_id
def setUp(self):
self._pairs = []
# This server is for Channelz info fetching only
# It self should not enable Channelz
self._server = grpc.server(
futures.ThreadPoolExecutor(max_workers=3),
options=_DISABLE_REUSE_PORT + _DISABLE_CHANNELZ)
port = self._server.add_insecure_port('[::]:0')
channelz.add_channelz_servicer(self._server)
self._server.start()
# This channel is used to fetch Channelz info only
# Channelz should not be enabled
self._channel = grpc.insecure_channel('localhost:%d' % port,
_DISABLE_CHANNELZ)
self._channelz_stub = channelz_pb2_grpc.ChannelzStub(self._channel)
def tearDown(self):
self._server.stop(None)
self._channel.close()
_close_channel_server_pairs(self._pairs)
def test_get_top_channels_basic(self):
self._pairs = _generate_channel_server_pairs(1)
resp = self._channelz_stub.GetTopChannels(
channelz_pb2.GetTopChannelsRequest(start_channel_id=0))
self.assertEqual(len(resp.channel), 1)
self.assertEqual(resp.end, True)
def test_get_top_channels_high_start_id(self):
self._pairs = _generate_channel_server_pairs(1)
resp = self._channelz_stub.GetTopChannels(
channelz_pb2.GetTopChannelsRequest(start_channel_id=10000))
self.assertEqual(len(resp.channel), 0)
self.assertEqual(resp.end, True)
def test_successful_request(self):
self._pairs = _generate_channel_server_pairs(1)
self._send_successful_unary_unary(0)
resp = self._channelz_stub.GetChannel(
channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(0)))
self.assertEqual(resp.channel.data.calls_started, 1)
self.assertEqual(resp.channel.data.calls_succeeded, 1)
self.assertEqual(resp.channel.data.calls_failed, 0)
def test_failed_request(self):
self._pairs = _generate_channel_server_pairs(1)
self._send_failed_unary_unary(0)
resp = self._channelz_stub.GetChannel(
channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(0)))
self.assertEqual(resp.channel.data.calls_started, 1)
self.assertEqual(resp.channel.data.calls_succeeded, 0)
self.assertEqual(resp.channel.data.calls_failed, 1)
def test_many_requests(self):
self._pairs = _generate_channel_server_pairs(1)
k_success = 7
k_failed = 9
for i in range(k_success):
self._send_successful_unary_unary(0)
for i in range(k_failed):
self._send_failed_unary_unary(0)
resp = self._channelz_stub.GetChannel(
channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(0)))
self.assertEqual(resp.channel.data.calls_started, k_success + k_failed)
self.assertEqual(resp.channel.data.calls_succeeded, k_success)
self.assertEqual(resp.channel.data.calls_failed, k_failed)
def test_many_channel(self):
k_channels = 4
self._pairs = _generate_channel_server_pairs(k_channels)
resp = self._channelz_stub.GetTopChannels(
channelz_pb2.GetTopChannelsRequest(start_channel_id=0))
self.assertEqual(len(resp.channel), k_channels)
def test_many_requests_many_channel(self):
k_channels = 4
self._pairs = _generate_channel_server_pairs(k_channels)
k_success = 11
k_failed = 13
for i in range(k_success):
self._send_successful_unary_unary(0)
self._send_successful_unary_unary(2)
for i in range(k_failed):
self._send_failed_unary_unary(1)
self._send_failed_unary_unary(2)
# The first channel saw only successes
resp = self._channelz_stub.GetChannel(
channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(0)))
self.assertEqual(resp.channel.data.calls_started, k_success)
self.assertEqual(resp.channel.data.calls_succeeded, k_success)
self.assertEqual(resp.channel.data.calls_failed, 0)
# The second channel saw only failures
resp = self._channelz_stub.GetChannel(
channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(1)))
self.assertEqual(resp.channel.data.calls_started, k_failed)
self.assertEqual(resp.channel.data.calls_succeeded, 0)
self.assertEqual(resp.channel.data.calls_failed, k_failed)
# The third channel saw both successes and failures
resp = self._channelz_stub.GetChannel(
channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(2)))
self.assertEqual(resp.channel.data.calls_started, k_success + k_failed)
self.assertEqual(resp.channel.data.calls_succeeded, k_success)
self.assertEqual(resp.channel.data.calls_failed, k_failed)
# The fourth channel saw nothing
resp = self._channelz_stub.GetChannel(
channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(3)))
self.assertEqual(resp.channel.data.calls_started, 0)
self.assertEqual(resp.channel.data.calls_succeeded, 0)
self.assertEqual(resp.channel.data.calls_failed, 0)
def test_many_subchannels(self):
k_channels = 4
self._pairs = _generate_channel_server_pairs(k_channels)
k_success = 17
k_failed = 19
for i in range(k_success):
self._send_successful_unary_unary(0)
self._send_successful_unary_unary(2)
for i in range(k_failed):
self._send_failed_unary_unary(1)
self._send_failed_unary_unary(2)
gtc_resp = self._channelz_stub.GetTopChannels(
channelz_pb2.GetTopChannelsRequest(start_channel_id=0))
self.assertEqual(len(gtc_resp.channel), k_channels)
for i in range(k_channels):
# If no call performed in the channel, there shouldn't be any subchannel
if gtc_resp.channel[i].data.calls_started == 0:
self.assertEqual(len(gtc_resp.channel[i].subchannel_ref), 0)
continue
# Otherwise, the subchannel should exist
self.assertGreater(len(gtc_resp.channel[i].subchannel_ref), 0)
gsc_resp = self._channelz_stub.GetSubchannel(
channelz_pb2.GetSubchannelRequest(
subchannel_id=gtc_resp.channel[i].subchannel_ref[
0].subchannel_id))
self.assertEqual(gtc_resp.channel[i].data.calls_started,
gsc_resp.subchannel.data.calls_started)
self.assertEqual(gtc_resp.channel[i].data.calls_succeeded,
gsc_resp.subchannel.data.calls_succeeded)
self.assertEqual(gtc_resp.channel[i].data.calls_failed,
gsc_resp.subchannel.data.calls_failed)
def test_server_basic(self):
self._pairs = _generate_channel_server_pairs(1)
resp = self._channelz_stub.GetServers(
channelz_pb2.GetServersRequest(start_server_id=0))
self.assertEqual(len(resp.server), 1)
def test_get_one_server(self):
self._pairs = _generate_channel_server_pairs(1)
gss_resp = self._channelz_stub.GetServers(
channelz_pb2.GetServersRequest(start_server_id=0))
self.assertEqual(len(gss_resp.server), 1)
gs_resp = self._channelz_stub.GetServer(
channelz_pb2.GetServerRequest(
server_id=gss_resp.server[0].ref.server_id))
self.assertEqual(gss_resp.server[0].ref.server_id,
gs_resp.server.ref.server_id)
def test_server_call(self):
self._pairs = _generate_channel_server_pairs(1)
k_success = 23
k_failed = 29
for i in range(k_success):
self._send_successful_unary_unary(0)
for i in range(k_failed):
self._send_failed_unary_unary(0)
resp = self._channelz_stub.GetServers(
channelz_pb2.GetServersRequest(start_server_id=0))
self.assertEqual(len(resp.server), 1)
self.assertEqual(resp.server[0].data.calls_started,
k_success + k_failed)
self.assertEqual(resp.server[0].data.calls_succeeded, k_success)
self.assertEqual(resp.server[0].data.calls_failed, k_failed)
def test_many_subchannels_and_sockets(self):
k_channels = 4
self._pairs = _generate_channel_server_pairs(k_channels)
k_success = 3
k_failed = 5
for i in range(k_success):
self._send_successful_unary_unary(0)
self._send_successful_unary_unary(2)
for i in range(k_failed):
self._send_failed_unary_unary(1)
self._send_failed_unary_unary(2)
gtc_resp = self._channelz_stub.GetTopChannels(
channelz_pb2.GetTopChannelsRequest(start_channel_id=0))
self.assertEqual(len(gtc_resp.channel), k_channels)
for i in range(k_channels):
# If no call performed in the channel, there shouldn't be any subchannel
if gtc_resp.channel[i].data.calls_started == 0:
self.assertEqual(len(gtc_resp.channel[i].subchannel_ref), 0)
continue
# Otherwise, the subchannel should exist
self.assertGreater(len(gtc_resp.channel[i].subchannel_ref), 0)
gsc_resp = self._channelz_stub.GetSubchannel(
channelz_pb2.GetSubchannelRequest(
subchannel_id=gtc_resp.channel[i].subchannel_ref[
0].subchannel_id))
self.assertEqual(len(gsc_resp.subchannel.socket_ref), 1)
gs_resp = self._channelz_stub.GetSocket(
channelz_pb2.GetSocketRequest(
socket_id=gsc_resp.subchannel.socket_ref[0].socket_id))
self.assertEqual(gsc_resp.subchannel.data.calls_started,
gs_resp.socket.data.streams_started)
self.assertEqual(gsc_resp.subchannel.data.calls_started,
gs_resp.socket.data.streams_succeeded)
# Calls started == messages sent, only valid for unary calls
self.assertEqual(gsc_resp.subchannel.data.calls_started,
gs_resp.socket.data.messages_sent)
# Only receive responses when the RPC was successful
self.assertEqual(gsc_resp.subchannel.data.calls_succeeded,
gs_resp.socket.data.messages_received)
def test_streaming_rpc(self):
self._pairs = _generate_channel_server_pairs(1)
# In C++, the argument for _send_successful_stream_stream is message length.
# Here the argument is still channel idx, to be consistent with the other two.
self._send_successful_stream_stream(0)
gc_resp = self._channelz_stub.GetChannel(
channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(0)))
self.assertEqual(gc_resp.channel.data.calls_started, 1)
self.assertEqual(gc_resp.channel.data.calls_succeeded, 1)
self.assertEqual(gc_resp.channel.data.calls_failed, 0)
# Subchannel exists
self.assertGreater(len(gc_resp.channel.subchannel_ref), 0)
gsc_resp = self._channelz_stub.GetSubchannel(
channelz_pb2.GetSubchannelRequest(
subchannel_id=gc_resp.channel.subchannel_ref[0].subchannel_id))
self.assertEqual(gsc_resp.subchannel.data.calls_started, 1)
self.assertEqual(gsc_resp.subchannel.data.calls_succeeded, 1)
self.assertEqual(gsc_resp.subchannel.data.calls_failed, 0)
# Socket exists
self.assertEqual(len(gsc_resp.subchannel.socket_ref), 1)
gs_resp = self._channelz_stub.GetSocket(
channelz_pb2.GetSocketRequest(
socket_id=gsc_resp.subchannel.socket_ref[0].socket_id))
self.assertEqual(gs_resp.socket.data.streams_started, 1)
self.assertEqual(gs_resp.socket.data.streams_succeeded, 1)
self.assertEqual(gs_resp.socket.data.streams_failed, 0)
self.assertEqual(gs_resp.socket.data.messages_sent,
test_constants.STREAM_LENGTH)
self.assertEqual(gs_resp.socket.data.messages_received,
test_constants.STREAM_LENGTH)
def test_server_sockets(self):
self._pairs = _generate_channel_server_pairs(1)
self._send_successful_unary_unary(0)
self._send_failed_unary_unary(0)
gs_resp = self._channelz_stub.GetServers(
channelz_pb2.GetServersRequest(start_server_id=0))
self.assertEqual(len(gs_resp.server), 1)
self.assertEqual(gs_resp.server[0].data.calls_started, 2)
self.assertEqual(gs_resp.server[0].data.calls_succeeded, 1)
self.assertEqual(gs_resp.server[0].data.calls_failed, 1)
gss_resp = self._channelz_stub.GetServerSockets(
channelz_pb2.GetServerSocketsRequest(
server_id=gs_resp.server[0].ref.server_id, start_socket_id=0))
# If the RPC call failed, it will raise a grpc.RpcError
# So, if there is no exception raised, considered pass
def test_server_listen_sockets(self):
self._pairs = _generate_channel_server_pairs(1)
gss_resp = self._channelz_stub.GetServers(
channelz_pb2.GetServersRequest(start_server_id=0))
self.assertEqual(len(gss_resp.server), 1)
self.assertEqual(len(gss_resp.server[0].listen_socket), 1)
gs_resp = self._channelz_stub.GetSocket(
channelz_pb2.GetSocketRequest(
socket_id=gss_resp.server[0].listen_socket[0].socket_id))
# If the RPC call failed, it will raise a grpc.RpcError
# So, if there is no exception raised, considered pass
def test_invalid_query_get_server(self):
try:
self._channelz_stub.GetServer(
channelz_pb2.GetServerRequest(server_id=10000))
except BaseException as e:
self.assertIn('StatusCode.NOT_FOUND', str(e))
else:
self.fail('Invalid query not detected')
def test_invalid_query_get_channel(self):
try:
self._channelz_stub.GetChannel(
channelz_pb2.GetChannelRequest(channel_id=10000))
except BaseException as e:
self.assertIn('StatusCode.NOT_FOUND', str(e))
else:
self.fail('Invalid query not detected')
def test_invalid_query_get_subchannel(self):
try:
self._channelz_stub.GetSubchannel(
channelz_pb2.GetSubchannelRequest(subchannel_id=10000))
except BaseException as e:
self.assertIn('StatusCode.NOT_FOUND', str(e))
else:
self.fail('Invalid query not detected')
def test_invalid_query_get_socket(self):
try:
self._channelz_stub.GetSocket(
channelz_pb2.GetSocketRequest(socket_id=10000))
except BaseException as e:
self.assertIn('StatusCode.NOT_FOUND', str(e))
else:
self.fail('Invalid query not detected')
def test_invalid_query_get_server_sockets(self):
try:
self._channelz_stub.GetServerSockets(
channelz_pb2.GetServerSocketsRequest(
server_id=10000,
start_socket_id=0,
))
except BaseException as e:
self.assertIn('StatusCode.NOT_FOUND', str(e))
else:
self.fail('Invalid query not detected')
if __name__ == '__main__':
unittest.main(verbosity=2)
| 42.60084
| 86
| 0.677532
|
5ffd64db0a180273e0e2fce56f5468babe83a663
| 956
|
py
|
Python
|
tests/constraints/test_traversal.py
|
bartfrenk/constraints
|
ef5f865307cc58e416f464882f74153614775c2f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/constraints/test_traversal.py
|
bartfrenk/constraints
|
ef5f865307cc58e416f464882f74153614775c2f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/constraints/test_traversal.py
|
bartfrenk/constraints
|
ef5f865307cc58e416f464882f74153614775c2f
|
[
"BSD-3-Clause"
] | null | null | null |
import constraints.traversal as sut
def _adj_fn(graph):
def fn(i):
return graph[i]
return fn
gamma = {0: [1, 2, 6], 1: [], 2: [1, 4], 3: [], 4: [3], 5: [1, 3], 6: [2]}
cycle = {0: [1], 1: [2], 2: [3], 3: [0]}
class TestMultiPaths(object):
# pylint: disable=no-self-use
def test_correct_result_on_dag(self):
actual = sut.multi_paths(_adj_fn(gamma), 0)
assert actual == {1: [[0, 1], [0, 2, 1]], 2: [[0, 2], [0, 6, 2]]}
def test_correct_result_on_cycle(self):
actual = sut.multi_paths(_adj_fn(cycle), 0)
assert actual == {}
class TestBFS(object):
# pylint: disable=no-self-use
def test_correct_result_on_dag(self):
actual = sut.bfs(_adj_fn(gamma), 0)
assert actual == {1: [0, 2], 2: [0, 6], 3: [4], 4: [2], 6: [0]}
def test_correct_result_on_cycle(self):
actual = sut.bfs(_adj_fn(cycle), 0)
assert actual == {0: [3], 1: [0], 2: [1], 3: [2]}
| 25.157895
| 74
| 0.549163
|
ab314767d64f2899bd20cf0d4e2c4e1c850ebe4b
| 2,480
|
py
|
Python
|
scikit-learn-weighted_kde/examples/linear_model/plot_logistic_multinomial.py
|
RTHMaK/git-squash-master
|
76c4c8437dd18114968e69a698f4581927fcdabf
|
[
"BSD-2-Clause"
] | null | null | null |
scikit-learn-weighted_kde/examples/linear_model/plot_logistic_multinomial.py
|
RTHMaK/git-squash-master
|
76c4c8437dd18114968e69a698f4581927fcdabf
|
[
"BSD-2-Clause"
] | null | null | null |
scikit-learn-weighted_kde/examples/linear_model/plot_logistic_multinomial.py
|
RTHMaK/git-squash-master
|
76c4c8437dd18114968e69a698f4581927fcdabf
|
[
"BSD-2-Clause"
] | null | null | null |
"""
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# Licence: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| 34.929577
| 74
| 0.603226
|
2458ab55fecb56acc09174d42196b5882fbb94c3
| 1,593
|
py
|
Python
|
staging/versions/f1f0b96dd139_.py
|
farbodab/flatteningthecurve
|
692fd9c8d78355e1208ff85a2cd1038da11c392f
|
[
"MIT"
] | 1
|
2020-03-24T23:46:29.000Z
|
2020-03-24T23:46:29.000Z
|
staging/versions/f1f0b96dd139_.py
|
farbodab/flatteningthecurve
|
692fd9c8d78355e1208ff85a2cd1038da11c392f
|
[
"MIT"
] | 13
|
2021-02-08T20:51:14.000Z
|
2022-03-12T00:43:30.000Z
|
staging/versions/f1f0b96dd139_.py
|
farbodab/flatteningthecurve
|
692fd9c8d78355e1208ff85a2cd1038da11c392f
|
[
"MIT"
] | 3
|
2020-06-09T20:24:29.000Z
|
2020-06-09T20:26:16.000Z
|
"""empty message
Revision ID: f1f0b96dd139
Revises: 110a27b89211
Create Date: 2020-05-25 00:24:47.226707
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f1f0b96dd139'
down_revision = '110a27b89211'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('members',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('team', sa.String(), nullable=True),
sa.Column('title', sa.String(), nullable=True),
sa.Column('first_name', sa.String(), nullable=True),
sa.Column('last_name', sa.String(), nullable=True),
sa.Column('education', sa.String(), nullable=True),
sa.Column('affiliation', sa.String(), nullable=True),
sa.Column('role', sa.String(), nullable=True),
sa.Column('team_status', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_members_first_name'), 'members', ['first_name'], unique=False)
op.create_index(op.f('ix_members_last_name'), 'members', ['last_name'], unique=False)
op.create_index(op.f('ix_members_team'), 'members', ['team'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_members_team'), table_name='members')
op.drop_index(op.f('ix_members_last_name'), table_name='members')
op.drop_index(op.f('ix_members_first_name'), table_name='members')
op.drop_table('members')
# ### end Alembic commands ###
| 34.630435
| 91
| 0.684871
|
bd6b5501f653b268cb4065656bea66968103f4e7
| 160
|
py
|
Python
|
project4/network/forms.py
|
VincVader/Social-Network
|
91751e199c904a38e03470c1ea0386b2d04851d8
|
[
"MIT"
] | null | null | null |
project4/network/forms.py
|
VincVader/Social-Network
|
91751e199c904a38e03470c1ea0386b2d04851d8
|
[
"MIT"
] | null | null | null |
project4/network/forms.py
|
VincVader/Social-Network
|
91751e199c904a38e03470c1ea0386b2d04851d8
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Like, Post, User
class NewPost(forms.ModelForm):
class Meta:
model = Post
fields = {'content'}
| 22.857143
| 36
| 0.6625
|
067c7fd4f34782debfe0ce32825863b037f4f73f
| 7,052
|
py
|
Python
|
ard/util.py
|
ms860309/AutomaticReactionDiscovery
|
ea009e1066058afd8a6a5d317d28d79016d8c93e
|
[
"MIT"
] | 1
|
2020-07-12T11:42:49.000Z
|
2020-07-12T11:42:49.000Z
|
ard/util.py
|
ms860309/AutomaticReactionDiscovery
|
ea009e1066058afd8a6a5d317d28d79016d8c93e
|
[
"MIT"
] | null | null | null |
ard/util.py
|
ms860309/AutomaticReactionDiscovery
|
ea009e1066058afd8a6a5d317d28d79016d8c93e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Provides utility functions and classes.
"""
from __future__ import division
import bisect
from functools import wraps
import logging
import os
import shutil
import time
import numpy as np
from quantum import Gaussian, QChem, NWChem
###############################################################################
def initializeLog(level, logfile, logname=None):
"""
Configure a logger. `level` is an integer parameter specifying how much
information is displayed in the log file. The levels correspond to those of
the :data:`logging` module.
"""
# Create logger
logger = logging.getLogger(logname)
logger.propagate = False
logger.setLevel(level)
logging.addLevelName(logging.CRITICAL, 'CRITICAL: ')
logging.addLevelName(logging.ERROR, 'ERROR: ')
logging.addLevelName(logging.WARNING, 'WARNING: ')
logging.addLevelName(logging.INFO, '')
logging.addLevelName(logging.DEBUG, '')
# Create formatter
formatter = logging.Formatter('%(levelname)s%(message)s')
# Create file handler
if os.path.exists(logfile):
os.remove(logfile)
fh = logging.FileHandler(filename=logfile)
fh.setLevel(min(logging.DEBUG, level))
fh.setFormatter(formatter)
# Remove old handlers
while logger.handlers:
logger.removeHandler(logger.handlers[0])
# Add file handler
logger.addHandler(fh)
return logger
class Copier(object):
"""
Function object that creates picklable function, `fn`, with a constant
value for some arguments of the function, set as`self.args` and
`self.kwargs`. This enables using `fn` in conjunction with `map` if the
sequence being mapped to the function does not correspond to the first
function argument and if the function has multiple arguments. `var_kw` has
to specify the names of variable keyword arguments in a list in the order
corresponding to any keyword arguments in `var_args`.
"""
def __init__(self, fn, *const_args, **const_kwargs):
self.fn = fn
self.args = const_args
self.kwargs = const_kwargs
self.kw = self.kwargs.pop('var_kw', None)
def __call__(self, *var_args):
if self.kw is not None:
num_var_kw = len(self.kw)
args = self.args + var_args[:-num_var_kw]
var_kwargs = {key: val for (key, val) in zip(self.kw, var_args[-num_var_kw:])}
kwargs = dict(self.kwargs, **var_kwargs)
return self.fn(*args, **kwargs)
args = self.args + var_args
return self.fn(*args, **self.kwargs)
def makeOutputSubdirectory(output_dir, folder):
"""
Create a subdirectory `folder` in the output directory. If the folder
already exists, its contents are deleted. Returns the path to the
subdirectory.
"""
subdir = os.path.join(output_dir, folder)
if os.path.exists(subdir):
shutil.rmtree(subdir)
os.mkdir(subdir)
return subdir
def makeReactionSubdirectory(output_dir, folder):
"""
make Subdirectory without delete exist directory
"""
subdir = os.path.join(output_dir, folder)
if not os.path.exists(subdir):
os.mkdir(subdir)
return subdir
def timeFn(fn):
@wraps(fn)
def fnWithTime(*args, **kwargs):
start_time = time.time()
result = fn(*args, **kwargs)
final_time = time.time()
# Has to be used for a method of a class that has a `logger` attribute
args[0].logger.info('{} completed in {:.2f} s'.format(fn.__name__, final_time - start_time))
return result
return fnWithTime
def logStartAndFinish(fn):
@wraps(fn)
def fnWrappedWithLog(*args, **kwargs):
# Has to be used for a method of a class that has a `logger` attribute
args[0].logger.info('\n----------------------------------------------------------------------')
args[0].logger.info('{} initiated on {}\n'.format(fn.__name__, time.asctime()))
result = fn(*args, **kwargs)
args[0].logger.info('{} terminated on {}'.format(fn.__name__, time.asctime()))
args[0].logger.info('----------------------------------------------------------------------\n')
return result
return fnWrappedWithLog
def assignQclass(qprog):
"""
Choose the appropriate quantum class based on the `qprog` string.
"""
if qprog == 'gau':
Qclass = Gaussian
elif qprog == 'qchem':
Qclass = QChem
elif qprog == 'nwchem':
Qclass = NWChem
else:
raise Exception('Invalid quantum software')
return Qclass
def findClosest(a, x):
"""
Returns index of value closest to `x` in sorted sequence `a`.
"""
idx = bisect.bisect_left(a, x)
if idx == 0:
return a[0]
if idx == len(a):
return a[-1]
if a[idx] - x < x - a[idx - 1]:
return idx
else:
return idx - 1
def getDistMat(coords):
"""
Calculate and return distance matrix form given an N x 3 array of
Cartesian coordinates. The matrix is N x N. Only the upper diagonal
elements contain the distances. All other elements are set to 0.
"""
coords = coords.reshape(np.size(coords) // 3, 3)
x = coords[:, 0]
y = coords[:, 1]
z = coords[:, 2]
dx = x[..., np.newaxis] - x[np.newaxis, ...]
dy = y[..., np.newaxis] - y[np.newaxis, ...]
dz = z[..., np.newaxis] - z[np.newaxis, ...]
return np.triu((np.array([dx, dy, dz]) ** 2).sum(axis=0) ** 0.5)
def rotationMatrix(angles, axis=None):
"""
Calculates and returns the rotation matrix defined by three angles of
rotation about the x, y, and z axes or one angle of rotation about a
given axis.
"""
if axis is None:
Rx = np.array(
[[1.0, 0.0, 0.0],
[0.0, np.cos(angles[0]), -np.sin(angles[0])],
[0.0, np.sin(angles[0]), np.cos(angles[0])]]
)
Ry = np.array(
[[np.cos(angles[1]), 0.0, np.sin(angles[1])],
[0.0, 1.0, 0.0],
[-np.sin(angles[1]), 0.0, np.cos(angles[1])]]
)
Rz = np.array(
[[np.cos(angles[2]), -np.sin(angles[2]), 0.0],
[np.sin(angles[2]), np.cos(angles[2]), 0.0],
[0.0, 0.0, 1.0]]
)
return Rx.dot(Ry).dot(Rz)
else:
axis = axis/np.sqrt(axis.dot(axis))
x, y, z = axis[0], axis[1], axis[2]
angle = angles
R = np.array(
[[np.cos(angle) + x ** 2 * (1 - np.cos(angle)),
x * y * (1 - np.cos(angle)) - z * np.sin(angle),
x * z * (1 - np.cos(angle))+y * np.sin(angle)],
[y * x * (1 - np.cos(angle))+z * np.sin(angle),
np.cos(angle) + y ** 2 * (1 - np.cos(angle)),
y * z * (1 - np.cos(angle)) - x * np.sin(angle)],
[z * x * (1 - np.cos(angle)) - y * np.sin(angle),
z * y * (1 - np.cos(angle)) + x * np.sin(angle),
np.cos(angle) + z ** 2 * (1 - np.cos(angle))]]
)
return R
| 33.107981
| 103
| 0.574022
|
ce5154ca3447395dd05119db64d53732e998c017
| 1,539
|
py
|
Python
|
domain_adaptation/models/M64/VAE_64.py
|
DexiongYung/robustnav_AE
|
f2b1b5bb8780e4e6ae5f81c127b7589cfc949801
|
[
"MIT"
] | null | null | null |
domain_adaptation/models/M64/VAE_64.py
|
DexiongYung/robustnav_AE
|
f2b1b5bb8780e4e6ae5f81c127b7589cfc949801
|
[
"MIT"
] | null | null | null |
domain_adaptation/models/M64/VAE_64.py
|
DexiongYung/robustnav_AE
|
f2b1b5bb8780e4e6ae5f81c127b7589cfc949801
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from domain_adaptation.models.common import *
from domain_adaptation.models.M64.M64 import M64
class Encoder(nn.Module):
def __init__(self, content_latent_size = 32, input_channel = 3, flatten_size = 1024):
super(Encoder, self).__init__()
self.encoder = carracing_encoder(input_channel)
self.linear_mu = nn.Linear(flatten_size, content_latent_size)
self.linear_sigma = nn.Linear(flatten_size, content_latent_size)
def forward(self, x):
x1 = self.encoder(x)
x_flatten = x1.flatten(start_dim=1)
mu = self.linear_mu(x_flatten)
sigma = self.linear_sigma(x_flatten)
latent = reparameterize(mu, sigma)
return mu, sigma, latent
class VAE_64(M64):
def __init__(self, content_latent_size = 32, input_channel = 3, flatten_size = 1024, **kwargs):
super(VAE_64, self).__init__(content_latent_size, input_channel, flatten_size)
self.encoder = Encoder(content_latent_size, input_channel, flatten_size)
self.decoder_fc1 = nn.Linear(content_latent_size, flatten_size)
self.decoder = carracing_decoder(flatten_size)
def forward(self, x, return_latent: bool = False):
mu, sigma, latent = self.encoder(x)
latent_1 = self.decoder_fc1(latent)
flatten_x = latent_1.unsqueeze(-1).unsqueeze(-1)
recon_x = self.decoder(flatten_x)
if return_latent:
return mu, sigma, recon_x, latent
else:
return mu, sigma, recon_x
| 38.475
| 99
| 0.684211
|
66e90807248e6b36d66e2c2d16d9b38671ad2c15
| 3,494
|
py
|
Python
|
monai/utils/state_cacher.py
|
RobinCamarasa/MONAI
|
8207e1e2a3555ddc3fe938e058552651900dc951
|
[
"Apache-2.0"
] | 1
|
2022-01-04T21:38:23.000Z
|
2022-01-04T21:38:23.000Z
|
monai/utils/state_cacher.py
|
RobinCamarasa/MONAI
|
8207e1e2a3555ddc3fe938e058552651900dc951
|
[
"Apache-2.0"
] | null | null | null |
monai/utils/state_cacher.py
|
RobinCamarasa/MONAI
|
8207e1e2a3555ddc3fe938e058552651900dc951
|
[
"Apache-2.0"
] | null | null | null |
import copy
import os
import tempfile
from typing import Dict, Optional
import torch
__all__ = ["StateCacher"]
class StateCacher:
"""Class to cache and retrieve the state of an object.
Objects can either be stored in memory or on disk. If stored on disk, they can be
stored in a given directory, or alternatively a temporary location will be used.
If necessary/possible, restored objects will be returned to their original device.
Example:
>>> state_cacher = StateCacher(memory_cache, cache_dir=cache_dir)
>>> state_cacher.store("model", model.state_dict())
>>> model.load_state_dict(state_cacher.retrieve("model"))
"""
def __init__(
self,
in_memory: bool,
cache_dir: Optional[str] = None,
allow_overwrite: bool = True,
) -> None:
"""Constructor.
Args:
in_memory: boolean to determine if the object will be cached in memory or on
disk.
cache_dir: directory for data to be cached if `in_memory==False`. Defaults
to using a temporary directory. Any created files will be deleted during
the `StateCacher`'s destructor.
allow_overwrite: allow the cache to be overwritten. If set to `False`, an
error will be thrown if a matching already exists in the list of cached
objects.
"""
self.in_memory = in_memory
self.cache_dir = cache_dir
self.allow_overwrite = allow_overwrite
if self.cache_dir is None:
self.cache_dir = tempfile.gettempdir()
else:
if not os.path.isdir(self.cache_dir):
raise ValueError("Given `cache_dir` is not a valid directory.")
self.cached: Dict[str, str] = {}
def store(self, key, data_obj):
"""Store a given object with the given key name."""
if key in self.cached and not self.allow_overwrite:
raise RuntimeError("Cached key already exists and overwriting is disabled.")
if self.in_memory:
self.cached.update({key: {"obj": copy.deepcopy(data_obj)}})
else:
fn = os.path.join(self.cache_dir, f"state_{key}_{id(self)}.pt")
self.cached.update({key: {"obj": fn}})
torch.save(data_obj, fn)
# store object's device if relevant
if hasattr(data_obj, "device"):
self.cached[key]["device"] = data_obj.device
def retrieve(self, key):
"""Retrieve the object stored under a given key name."""
if key not in self.cached:
raise KeyError(f"Target {key} was not cached.")
if self.in_memory:
return self.cached[key]["obj"]
fn = self.cached[key]["obj"] # pytype: disable=attribute-error
if not os.path.exists(fn): # pytype: disable=wrong-arg-types
raise RuntimeError(f"Failed to load state in {fn}. File doesn't exist anymore.")
data_obj = torch.load(fn, map_location=lambda storage, location: storage)
# copy back to device if necessary
if "device" in self.cached[key]:
data_obj = data_obj.to(self.cached[key]["device"])
return data_obj
def __del__(self):
"""If necessary, delete any cached files existing in `cache_dir`."""
if not self.in_memory:
for k in self.cached:
if os.path.exists(self.cached[k]["obj"]):
os.remove(self.cached[k]["obj"])
| 37.569892
| 92
| 0.615341
|
e834925a275182f5ce1ef3fa06763e60def0f36f
| 67,256
|
py
|
Python
|
src/opserver/test/utils/analytics_fixture.py
|
safchain/contrail-controller
|
ccf736b96e4372132cb27a37d47f43373e56b320
|
[
"Apache-2.0"
] | null | null | null |
src/opserver/test/utils/analytics_fixture.py
|
safchain/contrail-controller
|
ccf736b96e4372132cb27a37d47f43373e56b320
|
[
"Apache-2.0"
] | null | null | null |
src/opserver/test/utils/analytics_fixture.py
|
safchain/contrail-controller
|
ccf736b96e4372132cb27a37d47f43373e56b320
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import resource
import socket
import fixtures
import subprocess
from util import retry
from mockredis import mockredis
import redis
import urllib2
import copy
import os
import json
from operator import itemgetter
from opserver_introspect_utils import VerificationOpsSrv
from collector_introspect_utils import VerificationCollector
from opserver.sandesh.viz.constants import COLLECTOR_GLOBAL_TABLE, SOURCE, MODULE
class Query(object):
table = None
start_time = None
end_time = None
select_fields = None
where = None
sort = None
sort_fields = None
limit = None
filter = None
def __init__(self, table, start_time, end_time, select_fields, where = None,
sort_fields = None, sort = None, limit = None, filter = None):
self.table = table
self.start_time = start_time
self.end_time = end_time
self.select_fields = select_fields
if where is not None:
self.where = where
if sort_fields is not None:
self.sort_fields = sort_fields
if sort is not None:
self.sort = sort
if limit is not None:
self.limit = limit
if filter is not None:
self.filter = filter
class Collector(object):
def __init__(self, analytics_fixture, redis_uve,
logger, is_dup=False):
self.analytics_fixture = analytics_fixture
self.listen_port = AnalyticsFixture.get_free_port()
self.http_port = AnalyticsFixture.get_free_port()
self.hostname = socket.gethostname()
self._instance = None
self._redis_uve = redis_uve
self._logger = logger
self._is_dup = is_dup
if self._is_dup is True:
self.hostname = self.hostname+'dup'
# end __init__
def get_addr(self):
return '127.0.0.1:'+str(self.listen_port)
# end get_addr
def start(self):
assert(self._instance == None)
self._log_file = '/tmp/vizd.messages.' + str(self.listen_port)
subprocess.call(['rm', '-rf', self._log_file])
args = [self.analytics_fixture.builddir + '/analytics/vizd',
'--DEFAULT.cassandra_server_list', '127.0.0.1:' +
str(self.analytics_fixture.cassandra_port),
'--REDIS.port',
str(self._redis_uve.port),
'--COLLECTOR.port', str(self.listen_port),
'--DEFAULT.http_server_port', str(self.http_port),
'--DEFAULT.log_file', self._log_file]
if self._is_dup is True:
args.append('--DEFAULT.dup')
self._instance = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn = AnalyticsFixture.enable_core)
self._logger.info('Setting up Vizd: %s' % (' '.join(args)))
# end start
def stop(self):
if self._instance is not None:
self._logger.info('Shutting down Vizd: 127.0.0.1:%d'
% (self.listen_port))
self._instance.terminate()
(vizd_out, vizd_err) = self._instance.communicate()
vcode = self._instance.returncode
if vcode != 0:
self._logger.info('vizd returned %d' % vcode)
self._logger.info('vizd terminated stdout: %s' % vizd_out)
self._logger.info('vizd terminated stderr: %s' % vizd_err)
subprocess.call(['rm', self._log_file])
assert(vcode == 0)
self._instance = None
# end stop
# end class Collector
class OpServer(object):
def __init__(self, primary_collector, secondary_collector, redis_port,
analytics_fixture, logger, is_dup=False):
self.primary_collector = primary_collector
self.secondary_collector = secondary_collector
self.analytics_fixture = analytics_fixture
self.listen_port = AnalyticsFixture.get_free_port()
self.http_port = AnalyticsFixture.get_free_port()
self._redis_port = redis_port
self._instance = None
self._logger = logger
self._is_dup = is_dup
# end __init__
def set_primary_collector(self, collector):
self.primary_collector = collector
# end set_primary_collector
def set_secondary_collector(self, collector):
self.secondary_collector = collector
# end set_secondary_collector
def start(self):
assert(self._instance == None)
self._log_file = '/tmp/opserver.messages.' + str(self.listen_port)
subprocess.call(['rm', '-rf', self._log_file])
args = ['python', self.analytics_fixture.builddir + \
'/analytics_test/bin/contrail-analytics-api',
'--redis_server_port', str(self._redis_port),
'--redis_query_port',
str(self.analytics_fixture.redis_query.port),
'--http_server_port', str(self.http_port),
'--log_file', self._log_file,
'--rest_api_port', str(self.listen_port)]
args.append('--redis_uve_list')
for redis_uve in self.analytics_fixture.redis_uves:
args.append('127.0.0.1:'+str(redis_uve.port))
args.append('--collectors')
args.append(self.primary_collector)
if self.secondary_collector is not None:
args.append(self.secondary_collector)
if self._is_dup:
args.append('--dup')
self._instance = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self._logger.info('Setting up OpServer: %s' % ' '.join(args))
# end start
def stop(self):
if self._instance is not None:
self._logger.info('Shutting down OpServer 127.0.0.1:%d'
% (self.listen_port))
self._instance.terminate()
(op_out, op_err) = self._instance.communicate()
ocode = self._instance.returncode
if ocode != 0:
self._logger.info('OpServer returned %d' % ocode)
self._logger.info('OpServer terminated stdout: %s' % op_out)
self._logger.info('OpServer terminated stderr: %s' % op_err)
subprocess.call(['rm', self._log_file])
self._instance = None
# end stop
def send_tracebuffer_request(self, src, mod, instance, tracebuf):
vops = VerificationOpsSrv('127.0.0.1', self.listen_port)
res = vops.send_tracebuffer_req(src, mod, instance, tracebuf)
self._logger.info('send_tracebuffer_request: %s' % (str(res)))
assert(res['status'] == 'pass')
# end send_tracebuffer_request
# end class OpServer
class QueryEngine(object):
def __init__(self, primary_collector, secondary_collector,
analytics_fixture, logger):
self.primary_collector = primary_collector
self.secondary_collector = secondary_collector
self.analytics_fixture = analytics_fixture
self.listen_port = AnalyticsFixture.get_free_port()
self.http_port = AnalyticsFixture.get_free_port()
self._instance = None
self._logger = logger
# end __init__
def set_primary_collector(self, collector):
self.primary_collector = collector
# end set_primary_collector
def set_secondary_collector(self, collector):
self.secondary_collector = collector
# end set_secondary_collector
def start(self, analytics_start_time=None):
assert(self._instance == None)
self._log_file = '/tmp/qed.messages.' + str(self.listen_port)
subprocess.call(['rm', '-rf', self._log_file])
args = [self.analytics_fixture.builddir + '/query_engine/qedt',
'--REDIS.port', str(self.analytics_fixture.redis_query.port),
'--DEFAULT.cassandra_server_list', '127.0.0.1:' +
str(self.analytics_fixture.cassandra_port),
'--DEFAULT.http_server_port', str(self.listen_port),
'--DEFAULT.log_local', '--DEFAULT.log_level', 'SYS_DEBUG',
'--DEFAULT.log_file', self._log_file,
'--DEFAULT.collectors', self.primary_collector]
if self.secondary_collector is not None:
args.append('--DEFAULT.collectors')
args.append(self.secondary_collector)
if analytics_start_time is not None:
args += ['--DEFAULT.start_time', str(analytics_start_time)]
self._instance = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn = AnalyticsFixture.enable_core)
self._logger.info('Setting up QueryEngine: %s' % ' '.join(args))
# end start
def stop(self):
if self._instance is not None:
self._logger.info('Shutting down QueryEngine: 127.0.0.1:%d'
% (self.listen_port))
self._instance.terminate()
(qe_out, qe_err) = self._instance.communicate()
rcode = self._instance.returncode
if rcode != 0:
self._logger.info('QueryEngine returned %d' % rcode)
self._logger.info('QueryEngine terminated stdout: %s' % qe_out)
self._logger.info('QueryEngine terminated stderr: %s' % qe_err)
subprocess.call(['rm', self._log_file])
assert(rcode == 0)
self._instance = None
# end stop
# end class QueryEngine
class Redis(object):
def __init__(self,builddir):
self.builddir = builddir
self.port = AnalyticsFixture.get_free_port()
self.running = False
# end __init__
def start(self):
assert(self.running == False)
self.running = True
mockredis.start_redis(self.port,self.builddir+'/testroot/bin/redis-server')
# end start
def stop(self):
if self.running:
mockredis.stop_redis(self.port)
self.running = False
#end stop
# end class Redis
class AnalyticsFixture(fixtures.Fixture):
def __init__(self, logger, builddir, cassandra_port,
noqed=False, collector_ha_test=False):
self.builddir = builddir
self.cassandra_port = cassandra_port
self.logger = logger
self.noqed = noqed
self.collector_ha_test = collector_ha_test
def setUp(self):
super(AnalyticsFixture, self).setUp()
self.redis_uves = [Redis(self.builddir)]
self.redis_uves[0].start()
self.redis_query = Redis(self.builddir)
self.redis_query.start()
self.collectors = [Collector(self, self.redis_uves[0], self.logger)]
self.collectors[0].start()
self.opserver_port = None
if self.verify_collector_gen(self.collectors[0]):
primary_collector = self.collectors[0].get_addr()
secondary_collector = None
if self.collector_ha_test:
self.redis_uves.append(Redis(self.builddir))
self.redis_uves[1].start()
self.collectors.append(Collector(self, self.redis_uves[1],
self.logger, True))
self.collectors[1].start()
secondary_collector = self.collectors[1].get_addr()
self.opserver = OpServer(primary_collector, secondary_collector,
self.redis_uves[0].port,
self, self.logger)
self.opserver.start()
self.opserver_port = self.opserver.listen_port
self.query_engine = QueryEngine(primary_collector,
secondary_collector,
self, self.logger)
if not self.noqed:
self.query_engine.start()
# end setUp
def get_collector(self):
return '127.0.0.1:'+str(self.collectors[0].listen_port)
# end get_collector
def get_collectors(self):
return ['127.0.0.1:'+str(self.collectors[0].listen_port),
'127.0.0.1:'+str(self.collectors[1].listen_port)]
# end get_collectors
def get_opserver_port(self):
return self.opserver.listen_port
# end get_opserver_port
def verify_on_setup(self):
result = True
if self.opserver_port is None:
result = result and False
self.logger.error("Collector UVE not in Redis")
if self.opserver_port is None:
result = result and False
self.logger.error("OpServer not started")
if not self.verify_opserver_api():
result = result and False
self.logger.error("OpServer not responding")
self.verify_is_run = True
return result
@retry(delay=2, tries=20)
def verify_collector_gen(self, collector):
'''
See if the SandeshClient within vizd has been able to register
with the collector within vizd
'''
vcl = VerificationCollector('127.0.0.1', collector.http_port)
try:
genlist = vcl.get_generators()['generators']
src = genlist[0]['source']
except:
return False
self.logger.info("Src Name is %s" % src)
if src == socket.gethostname():
return True
else:
return False
@retry(delay=1, tries=10)
def verify_opserver_api(self):
'''
Verify that the opserver is accepting client requests
'''
data = {}
url = 'http://127.0.0.1:' + str(self.opserver_port) + '/'
try:
data = urllib2.urlopen(url).read()
except urllib2.HTTPError, e:
self.logger.info("HTTP error: %d" % e.code)
except urllib2.URLError, e:
self.logger.info("Network error: %s" % e.reason.args[1])
self.logger.info("Checking OpServer %s" % str(data))
if data == {}:
return False
else:
return True
@retry(delay=2, tries=10)
def verify_collector_obj_count(self):
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
res = vns.post_query('ObjectCollectorInfo',
start_time='-10m', end_time='now',
select_fields=["ObjectLog"],
where_clause=str(
'ObjectId=' + socket.gethostname()),
sync=False)
if res == []:
return False
else:
assert(len(res) > 0)
self.logger.info(str(res))
return True
@retry(delay=1, tries=10)
def verify_generator_list(self, collector, exp_genlist):
vcl = VerificationCollector('127.0.0.1', collector.http_port)
try:
genlist = vcl.get_generators()['generators']
self.logger.info('generator list: ' + str(genlist))
self.logger.info('exp generator list: ' + str(exp_genlist))
if len(genlist) != len(exp_genlist):
return False
for mod in exp_genlist:
gen_found = False
for gen in genlist:
if mod == gen['module_id']:
gen_found = True
if gen['state'] != 'Established':
return False
break
if gen_found is not True:
return False
except Exception as err:
self.logger.error('Exception: %s' % err)
return False
return True
@retry(delay=1, tries=10)
def verify_generator_uve_list(self, exp_gen_list):
self.logger.info('verify_generator_uve_list')
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
# get generator list
gen_list = vns.uve_query('generators?cfilt=ModuleClientState:client_info')
try:
actual_gen_list = [gen['name'] for gen in gen_list]
self.logger.info('generators: %s' % str(actual_gen_list))
for gen in exp_gen_list:
if gen not in actual_gen_list:
return False
except Exception as e:
self.logger.error('Exception: %s' % e)
return True
# end verify_generator_uve_list
@retry(delay=1, tries=6)
def verify_message_table_messagetype(self):
self.logger.info("verify_message_table_messagetype")
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
# query for CollectorInfo logs
res = vns.post_query('MessageTable',
start_time='-10m', end_time='now',
select_fields=["ModuleId"],
where_clause="Messagetype = CollectorInfo")
if (res == []):
return False
assert(len(res) > 0)
# verify if the result returned is ok
moduleids = list(set(x['ModuleId'] for x in res))
self.logger.info("Modules: %s " % str(moduleids))
# only one moduleid: Collector
if (not((len(moduleids) == 1))):
return False
if (not ("Collector" in moduleids)):
return False
return True
@retry(delay=1, tries=6)
def verify_message_table_select_uint_type(self):
self.logger.info("verify_message_table_select_uint_type")
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
# query for CollectorInfo logs
res = vns.post_query('MessageTable',
start_time='-10m', end_time='now',
select_fields=["Level", "Type", "MessageTS", "SequenceNum"],
where_clause='')
if (res == []):
return False
else:
for x in res:
assert('Level' in x)
assert('Type' in x)
assert('MessageTS' in x)
assert('SequenceNum' in x)
assert(type(x['Level']) is int)
assert(type(x['Type']) is int)
assert(type(x['MessageTS']) is int)
assert(type(x['SequenceNum']) is int)
return True
@retry(delay=1, tries=6)
def verify_message_table_moduleid(self):
self.logger.info("verify_message_table_moduleid")
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
# query for QueryEngine logs
res_qe = vns.post_query('MessageTable',
start_time='-10m', end_time='now',
select_fields=["Type", "Messagetype"],
where_clause="ModuleId = QueryEngine")
# query for Collector logs
res_c = vns.post_query('MessageTable',
start_time='-10m', end_time='now',
select_fields=["Type", "Messagetype"],
where_clause="ModuleId = Collector")
if (res_qe == []) or (res_c == []):
return False
assert(len(res_qe) > 0)
assert(len(res_c) > 0)
return True
@retry(delay=1, tries=6)
def verify_message_table_where_or(self):
self.logger.info("verify_message_table_where_or")
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
where_clause1 = "ModuleId = QueryEngine"
where_clause2 = str("Source =" + socket.gethostname())
res = vns.post_query(
'MessageTable',
start_time='-10m', end_time='now',
select_fields=["ModuleId"],
where_clause=str(where_clause1 + " OR " + where_clause2))
if res == []:
return False
else:
assert(len(res) > 0)
moduleids = list(set(x['ModuleId'] for x in res))
self.logger.info(str(moduleids))
if ('Collector' in moduleids) and ('QueryEngine' in moduleids):
return True
else:
return False
@retry(delay=1, tries=6)
def verify_message_table_where_and(self):
self.logger.info("verify_message_table_where_and")
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
where_clause1 = "ModuleId = QueryEngine"
where_clause2 = str("Source =" + socket.gethostname())
res = vns.post_query(
'MessageTable',
start_time='-10m', end_time='now',
select_fields=["ModuleId"],
where_clause=str(where_clause1 + " AND " + where_clause2))
if res == []:
return False
else:
assert(len(res) > 0)
moduleids = list(set(x['ModuleId'] for x in res))
self.logger.info(str(moduleids))
if len(moduleids) == 1: # 1 moduleid: QueryEngine
return True
else:
return False
@retry(delay=1, tries=6)
def verify_message_table_filter(self):
self.logger.info("verify_message_table_where_filter")
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
where_clause1 = "ModuleId = QueryEngine"
where_clause2 = str("Source =" + socket.gethostname())
res = vns.post_query('MessageTable',
start_time='-10m', end_time='now',
select_fields=["ModuleId"],
where_clause=str(
where_clause1 + " OR " + where_clause2),
filter="ModuleId = QueryEngine")
if res == []:
return False
else:
assert(len(res) > 0)
moduleids = list(set(x['ModuleId'] for x in res))
self.logger.info(str(moduleids))
if len(moduleids) != 1: # 1 moduleid: Collector
return False
res1 = vns.post_query('MessageTable',
start_time='-10m', end_time='now',
select_fields=["ModuleId"],
where_clause=str(
where_clause1 + " AND " + where_clause2),
filter="ModuleId = Collector")
self.logger.info(str(res1))
if res1 != []:
return False
return True
@retry(delay=1, tries=1)
def verify_message_table_sort(self):
self.logger.info("verify_message_table_sort:Ascending Sort")
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
where_clause1 = "ModuleId = QueryEngine"
where_clause2 = str("Source =" + socket.gethostname())
exp_moduleids = ['Collector', 'OpServer', 'QueryEngine']
# Ascending sort
res = vns.post_query('MessageTable',
start_time='-10m', end_time='now',
select_fields=["ModuleId"],
where_clause=str(
where_clause1 + " OR " + where_clause2),
sort_fields=["ModuleId"], sort=1)
if res == []:
return False
else:
assert(len(res) > 0)
moduleids = []
for x in res:
if x['ModuleId'] not in moduleids:
moduleids.append(x['ModuleId'])
self.logger.info(str(moduleids))
for module in exp_moduleids:
if module not in moduleids:
return False
expected_res = sorted(res, key=itemgetter('ModuleId'))
if res != expected_res:
return False
# Descending sort
self.logger.info("verify_message_table_sort:Descending Sort")
res = vns.post_query('MessageTable',
start_time='-10m', end_time='now',
select_fields=["ModuleId"],
where_clause=str(
where_clause1 + " OR " + where_clause2),
sort_fields=["ModuleId"], sort=2)
if res == []:
return False
else:
assert(len(res) > 0)
moduleids = []
for x in res:
if x['ModuleId'] not in moduleids:
moduleids.append(x['ModuleId'])
self.logger.info(str(moduleids))
for module in exp_moduleids:
if module not in moduleids:
return False
expected_res = sorted(
res, key=itemgetter('ModuleId'), reverse=True)
if res != expected_res:
return False
# Limit
res = vns.post_query('MessageTable',
start_time='-10m', end_time='now',
select_fields=["ModuleId"],
where_clause=str(
where_clause1 + " OR " + where_clause2),
sort_fields=["ModuleId"], sort=1, limit=1)
if res == []:
return False
else:
assert(len(res) > 0)
moduleids = []
for x in res:
if x['ModuleId'] not in moduleids:
moduleids.append(x['ModuleId'])
self.logger.info(str(moduleids))
if len(moduleids) == 1: # 2 moduleids: Collector/QueryEngine
if moduleids[0] != 'Collector':
return False
return True
else:
return False
@retry(delay=1, tries=8)
def verify_intervn_all(self, gen_obj):
self.logger.info("verify_intervn_all")
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
res = vns.post_query('StatTable.UveVirtualNetworkAgent.vn_stats',
start_time='-10m',
end_time='now',
select_fields=['T', 'name', 'UUID','vn_stats.other_vn', 'vn_stats.vrouter', 'vn_stats.in_tpkts'],
where_clause=gen_obj.vn_all_rows['whereclause'])
self.logger.info(str(res))
if len(res) == gen_obj.vn_all_rows['rows']:
return True
return False
@retry(delay=1, tries=8)
def verify_intervn_sum(self, gen_obj):
self.logger.info("verify_intervn_sum")
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
res = vns.post_query('StatTable.UveVirtualNetworkAgent.vn_stats',
start_time='-10m',
end_time='now',
select_fields=gen_obj.vn_sum_rows['select'],
where_clause=gen_obj.vn_sum_rows['whereclause'])
self.logger.info(str(res))
if len(res) == gen_obj.vn_sum_rows['rows']:
return True
return False
@retry(delay=1, tries=10)
def verify_flow_samples(self, generator_obj):
self.logger.info("verify_flow_samples")
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
vrouter = generator_obj._hostname
res = vns.post_query('FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['T'], dir=1, where_clause='vrouter=%s'% vrouter)
self.logger.info(str(res))
if len(res) != generator_obj.num_flow_samples:
return False
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
result = vns.post_query('FlowSeriesTable',
start_time=str(generator_obj.egress_flow_start_time),
end_time=str(generator_obj.egress_flow_end_time),
select_fields=['T'], dir=0, where_clause='vrouter=%s'% vrouter)
self.logger.info(str(result))
if len(result) != generator_obj.egress_num_flow_samples:
return False
return True
# end verify_flow_samples
def verify_where_query_prefix(self,generator_obj):
self.logger.info('verify where query in FlowSeriesTable')
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
vrouter = generator_obj._hostname
a_query = Query(table="FlowSeriesTable",
start_time=(generator_obj.flow_start_time),
end_time=(generator_obj.flow_end_time),
select_fields=["sourcevn","sourceip","vrouter"],
where=[[{"name":"sourcevn","value":"domain1:admin","op":7},
{"name":"destvn","value":"domain1:admin","op":7},
{"name":"vrouter","value":"%s"%vrouter,"op":1}]])
json_qstr = json.dumps(a_query.__dict__)
res = vns.post_query_json(json_qstr)
assert(len(res)>0)
a_query = Query(table="FlowSeriesTable",
start_time=(generator_obj.flow_start_time),
end_time=(generator_obj.flow_end_time),
select_fields=["sourcevn","sourceip","vrouter"],
where=[[{"name":"protocol","value":1,"op":1}]])
json_qstr = json.dumps(a_query.__dict__)
res = vns.post_query_json(json_qstr)
assert(len(res)>0)
return True
def verify_flow_table(self, generator_obj):
vrouter = generator_obj._hostname
# query flow records
self.logger.info('verify_flow_table')
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
res = vns.post_query('FlowRecordTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=[
'UuidKey', 'agg-packets', 'agg-bytes'],
where_clause='vrouter=%s'% vrouter)
self.logger.info("FlowRecordTable result:%s" % str(res))
assert(len(res) == generator_obj.flow_cnt)
# query based on various WHERE parameters
# sourcevn and sourceip
res = vns.post_query(
'FlowRecordTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['UuidKey', 'sourcevn', 'sourceip'],
where_clause='sourceip=10.10.10.1 AND sourcevn=domain1:admin:vn1 AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == generator_obj.flow_cnt)
res = vns.post_query(
'FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['sourcevn', 'sourceip'],
where_clause='sourceip=10.10.10.1 AND sourcevn=domain1:admin:vn1 AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == generator_obj.num_flow_samples)
# give non-existent values in the where clause
res = vns.post_query('FlowRecordTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['UuidKey', 'sourcevn', 'sourceip'],
where_clause='sourceip=20.1.1.10 AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == 0)
res = vns.post_query(
'FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['sourcevn', 'sourceip'],
where_clause='sourceip=20.1.1.10 AND sourcevn=domain1:admin:vn1 AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == 0)
# destvn and destip
res = vns.post_query(
'FlowRecordTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['UuidKey', 'destvn', 'destip'],
where_clause='destip=10.10.10.2 AND destvn=domain1:admin:vn2 AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == generator_obj.flow_cnt)
res = vns.post_query(
'FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['destvn', 'destip'],
where_clause='destip=10.10.10.2 AND destvn=domain1:admin:vn2 AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == generator_obj.num_flow_samples)
# give non-existent values in the where clause
res = vns.post_query(
'FlowRecordTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['UuidKey', 'destvn', 'destip'],
where_clause='destip=10.10.10.2 AND ' +
'destvn=default-domain:default-project:default-virtual-network AND' +
'vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == 0)
res = vns.post_query(
'FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['destvn', 'destip'],
where_clause='destip=20.1.1.10 AND destvn=domain1:admin:vn2 AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == 0)
# sport and protocol
res = vns.post_query('FlowRecordTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['UuidKey', 'sport', 'protocol'],
where_clause='sport=13 AND protocol=1 AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == 1)
res = vns.post_query('FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['sport', 'protocol'],
where_clause='sport=13 AND protocol=1 AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == 5)
# give no-existent values in the where clause
res = vns.post_query('FlowRecordTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['UuidKey', 'sport', 'protocol'],
where_clause='sport=20 AND protocol=17 AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == 0)
res = vns.post_query('FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['sport', 'protocol'],
where_clause='sport=20 AND protocol=1 AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == 0)
# dport and protocol
res = vns.post_query('FlowRecordTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['UuidKey', 'dport', 'protocol'],
where_clause='dport=104 AND protocol=2 AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == 1)
res = vns.post_query('FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['dport', 'protocol'],
where_clause='dport=104 AND protocol=2 AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == 5)
# give no-existent values in the where clause
res = vns.post_query('FlowRecordTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['UuidKey', 'dport', 'protocol'],
where_clause='dport=10 AND protocol=17 AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == 0)
res = vns.post_query('FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['dport', 'protocol'],
where_clause='dport=10 AND protocol=17 AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == 0)
# sort and limit
res = vns.post_query(
'FlowRecordTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['UuidKey', 'protocol'], where_clause='vrouter=%s'% vrouter,
sort_fields=['protocol'], sort=1)
self.logger.info(str(res))
assert(len(res) == generator_obj.flow_cnt)
assert(res[0]['protocol'] == 0)
res = vns.post_query('FlowRecordTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['protocol'], where_clause='vrouter=%s'% vrouter,
sort_fields=['protocol'], sort=2, limit=1)
self.logger.info(str(res))
assert(len(res) == 1)
assert(res[0]['protocol'] == 2)
return True
# end verify_flow_table
def verify_flow_series_aggregation_binning(self, generator_object):
generator_obj = generator_object[0]
vrouter = generator_obj._hostname
self.logger.info('verify_flow_series_aggregation_binning')
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
# Helper function for stats aggregation
def _aggregate_stats(flow, start_time, end_time):
stats = {'sum_bytes':0, 'sum_pkts':0}
for f in flow.samples:
if f._timestamp < start_time:
continue
elif f._timestamp > end_time:
break
stats['sum_bytes'] += f.flowdata.diff_bytes
stats['sum_pkts'] += f.flowdata.diff_packets
return stats
def _aggregate_flows_stats(flows, start_time, end_time):
stats = {'sum_bytes':0, 'sum_pkts':0}
for f in flows:
s = _aggregate_stats(f, start_time, end_time)
stats['sum_bytes'] += s['sum_bytes']
stats['sum_pkts'] += s['sum_pkts']
return stats
# 1. stats
self.logger.info('Flowseries: [sum(bytes), sum(packets), flow_count]')
res = vns.post_query(
'FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['sum(bytes)', 'sum(packets)', 'flow_count'],
where_clause='vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == 1)
exp_sum_pkts = exp_sum_bytes = 0
for f in generator_obj.flows:
exp_sum_pkts += f.packets
exp_sum_bytes += f.bytes
assert(res[0]['sum(packets)'] == exp_sum_pkts)
assert(res[0]['sum(bytes)'] == exp_sum_bytes)
assert(res[0]['flow_count'] == generator_obj.flow_cnt)
# 2. flow tuple + stats
self.logger.info(
'Flowseries: [sport, dport, sum(bytes), sum(packets), flow_count]')
# Each flow has unique (sport, dport). Therefore, the following query
# should return # records equal to the # flows.
res = vns.post_query(
'FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['sport', 'dport', 'sum(bytes)',
'sum(packets)', 'flow_count'],
where_clause='vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == generator_obj.flow_cnt)
for r in res:
cnt = 0
for f in generator_obj.flows:
if r['sport'] == f.sport and r['dport'] == f.dport:
assert(r['sum(packets)'] == f.packets)
assert(r['sum(bytes)'] == f.bytes)
assert(r['flow_count'] == 1)
break
cnt += 1
assert(cnt < generator_obj.flow_cnt)
# All flows has the same (sourcevn, destvn). Therefore, the following
# query should return one record.
res = vns.post_query(
'FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['sourcevn', 'destvn', 'sum(bytes)',
'sum(packets)', 'flow_count'],
where_clause='vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == 1)
exp_sum_pkts = exp_sum_bytes = 0
for f in generator_obj.flows:
exp_sum_pkts += f.packets
exp_sum_bytes += f.bytes
assert(res[0]['sum(packets)'] == exp_sum_pkts)
assert(res[0]['sum(bytes)'] == exp_sum_bytes)
assert(res[0]['flow_count'] == generator_obj.flow_cnt)
# top 3 flows
res = vns.post_query('FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['sport', 'dport', 'sum(bytes)'],
where_clause='vrouter=%s'% vrouter,
sort_fields=['sum(bytes)'], sort=2, limit=3)
self.logger.info(str(res))
assert(len(res) == 3)
exp_res = sorted(
generator_obj.flows, key=lambda flow: flow.bytes, reverse=True)
cnt = 0
for r in res:
assert(r['sport'] == exp_res[cnt].sport)
assert(r['dport'] == exp_res[cnt].dport)
assert(r['sum(bytes)'] == exp_res[cnt].bytes)
cnt += 1
# 3. T=<granularity> + stats
self.logger.info('Flowseries: [T=<x>, sum(bytes), sum(packets)]')
st = str(generator_obj.flow_start_time)
et = str(generator_obj.flow_start_time + (30 * 1000 * 1000))
granularity = 10
gms = granularity * 1000 * 1000 # in micro seconds
res = vns.post_query(
'FlowSeriesTable', start_time=st, end_time=et,
select_fields=['T=%s' % (granularity), 'sum(bytes)',
'sum(packets)'],
where_clause='sourcevn=domain1:admin:vn1 ' +
'AND destvn=domain1:admin:vn2 AND vrouter=%s'% vrouter)
diff_t = int(et) - int(st)
num_records = (diff_t/gms) + bool(diff_t%gms)
assert(len(res) == num_records)
ts = [generator_obj.flow_start_time + (x * gms) \
for x in range(num_records)]
exp_result = {}
for t in ts:
end_time = t + gms
if end_time > int(et):
end_time = int(et)
ts_stats = _aggregate_flows_stats(generator_obj.flows,
t, end_time)
exp_result[t] = {'sum(bytes)':ts_stats['sum_bytes'],
'sum(packets)':ts_stats['sum_pkts']}
self.logger.info('exp_result: %s' % str(exp_result))
self.logger.info('res: %s' % str(res))
assert(len(exp_result) == num_records)
for r in res:
try:
stats = exp_result[r['T']]
except KeyError:
assert(False)
assert(r['sum(bytes)'] == stats['sum(bytes)'])
assert(r['sum(packets)'] == stats['sum(packets)'])
# 4. T=<granularity> + tuples + stats
self.logger.info(
'Flowseries: [T=<x>, protocol, sum(bytes), sum(packets)]')
st = str(generator_obj.flow_start_time)
et = str(generator_obj.flow_start_time + (10 * 1000 * 1000))
granularity = 5
gms = 5 * 1000 * 1000
res = vns.post_query(
'FlowSeriesTable', start_time=st, end_time=et,
select_fields=['T=%s' % (granularity), 'protocol', 'sum(bytes)',
'sum(packets)'],
where_clause='sourcevn=domain1:admin:vn1 ' +
'AND destvn=domain1:admin:vn2 AND vrouter=%s'% vrouter)
diff_t = int(et) - int(st)
num_ts = (diff_t/gms) + bool(diff_t%gms)
ts = [generator_obj.flow_start_time + (x * gms) \
for x in range(num_ts)]
proto_flows = [
[generator_obj.flows[0], generator_obj.flows[1]],
[generator_obj.flows[2], generator_obj.flows[3]],
[generator_obj.flows[4]]
]
proto_ts = [ts, ts, [ts[0]]]
exp_result = {}
for i in range(0, len(proto_flows)):
ts_stats = {}
for ts in proto_ts[i]:
end_time = ts + gms
if end_time > int(et): end_time = int(et)
stats = _aggregate_flows_stats(proto_flows[i], ts, end_time)
ts_stats[ts] = {'sum(bytes)':stats['sum_bytes'],
'sum(packets)':stats['sum_pkts']}
exp_result[i] = ts_stats
self.logger.info('exp_result: %s' % str(exp_result))
self.logger.info('res: %s' % str(res))
assert(len(res) == 5)
for r in res:
try:
stats = exp_result[r['protocol']][r['T']]
except KeyError:
assert(False)
assert(r['sum(bytes)'] == stats['sum(bytes)'])
assert(r['sum(packets)'] == stats['sum(packets)'])
# 5. T=<granularity> + stats, granularity > (end_time - start_time)
self.logger.info('Flowseries: [T=<x>, sum(bytes), sum(packets)], '
'x > (end_time - start_time)')
st = str(generator_obj.flow_start_time)
et = str(generator_obj.flow_end_time)
granularity = 70
gms = granularity * 1000 * 1000 # in micro seconds
assert(gms > (int(et) - int(st)))
res = vns.post_query(
'FlowSeriesTable', start_time=st, end_time=et,
select_fields=['T=%s' % (granularity), 'sum(bytes)',
'sum(packets)'],
where_clause='vrouter=%s'% vrouter)
ts_stats = _aggregate_flows_stats(generator_obj.flows,
int(st), int(et))
exp_result = {int(st):{'sum(bytes)':ts_stats['sum_bytes'],
'sum(packets)':ts_stats['sum_pkts']}}
self.logger.info('exp_result: %s' % str(exp_result))
self.logger.info('res: %s' % str(res))
assert(len(res) == 1)
for r in res:
try:
stats = exp_result[r['T']]
except KeyError:
assert(False)
assert(r['sum(bytes)'] == stats['sum(bytes)'])
assert(r['sum(packets)'] == stats['sum(packets)'])
# 6. direction_ing + stats
self.logger.info('Flowseries: [direction_ing, sum(bytes), sum(packets), flow_count]')
res = vns.post_query(
'FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['direction_ing', 'sum(bytes)', 'sum(packets)', 'flow_count'],
where_clause='vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == 1)
exp_sum_pkts = exp_sum_bytes = 0
for f in generator_obj.flows:
exp_sum_pkts += f.packets
exp_sum_bytes += f.bytes
direction_ing = generator_obj.flows[0].direction_ing
assert(res[0]['sum(packets)'] == exp_sum_pkts)
assert(res[0]['sum(bytes)'] == exp_sum_bytes)
assert(res[0]['flow_count'] == generator_obj.flow_cnt)
assert(res[0]['direction_ing'] == direction_ing)
self.logger.info('Flowseries: [direction_ing, sum(bytes), sum(packets), flow_count]')
result = vns.post_query(
'FlowSeriesTable',
start_time=str(generator_obj.egress_flow_start_time),
end_time=str(generator_obj.egress_flow_end_time),
select_fields=['direction_ing', 'sum(bytes)', 'sum(packets)', 'flow_count'],
where_clause='vrouter=%s'% vrouter, dir=0)
self.logger.info(str(result))
assert(len(result) == 1)
exp_sum_pkts = exp_sum_bytes = 0
for f in generator_obj.egress_flows:
exp_sum_pkts += f.packets
exp_sum_bytes += f.bytes
direction_ing = generator_obj.egress_flows[0].direction_ing
assert(result[0]['sum(packets)'] == exp_sum_pkts)
assert(result[0]['sum(bytes)'] == exp_sum_bytes)
assert(result[0]['flow_count'] == generator_obj.flow_cnt)
assert(result[0]['direction_ing'] == direction_ing)
# 7. T=<granularity> + tuples
self.logger.info(
'Flowseries: [T=<x>, sourcevn, destvn, sport, dport, protocol]')
st = str(generator_obj.flow_start_time)
et = str(generator_obj.flow_start_time + (10 * 1000 * 1000))
granularity = 5
gms = 5 * 1000 * 1000
res = vns.post_query(
'FlowSeriesTable', start_time=st, end_time=et,
select_fields=['T=%s' % (granularity), 'protocol', 'sourcevn', 'destvn',
'sport', 'dport'],
where_clause='sourcevn=domain1:admin:vn1' +
'AND destvn=domain1:admin:vn2 AND vrouter=%s'% vrouter)
diff_t = int(et) - int(st)
num_ts = (diff_t/gms) + bool(diff_t%gms)
ts = [generator_obj.flow_start_time + (x * gms) \
for x in range(num_ts)]
exp_result = {}
exp_result_cnt=0
for i in generator_obj.flows:
exp_result[exp_result_cnt] = {'T':ts[0], 'sourcevn':i.sourcevn,
'destvn':i.destvn, 'sport':i.sport,
'dport':i.dport, 'protocol':i.protocol,}
exp_result_cnt +=1
records = generator_obj.flow_cnt-1
for i in range(0,records):
exp_result[exp_result_cnt] = {'T':ts[1], 'sourcevn':generator_obj.flows[i].sourcevn,
'destvn':generator_obj.flows[i].destvn,
'sport':generator_obj.flows[i].sport,
'dport':generator_obj.flows[i].dport,
'protocol':generator_obj.flows[i].protocol,}
exp_result_cnt +=1
assert(exp_result_cnt == len(res))
count = 0
for r in res:
assert(r['T'] == exp_result[count]['T'])
assert(r['sourcevn'] == exp_result[count]['sourcevn'])
assert(r['destvn'] == exp_result[count]['destvn'])
assert(r['sport'] == exp_result[count]['sport'])
assert(r['dport'] == exp_result[count]['dport'])
assert(r['protocol'] == exp_result[count]['protocol'])
count +=1
# 8. Timestamp + stats
self.logger.info('Flowseries: [T, bytes, packets]')
# testing for flows at index 1 in generator_obj.flows
flow = generator_obj.flows[1]
res = vns.post_query(
'FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['T', 'bytes', 'packets'],
where_clause='sourcevn=%s' %(flow.sourcevn) +
'AND destvn=%s AND sport= %d' %(flow.destvn, flow.sport) +
'AND dport=%d AND protocol=%d' %(flow.dport, flow.protocol) +
'AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == len(flow.samples))
for f in flow.samples:
found = 0
for r in res:
if r['T'] == f._timestamp:
assert(r['packets'] == f.flowdata.diff_packets)
assert(r['bytes'] == f.flowdata.diff_bytes)
found = 1
break
assert(found)
# 9. Raw bytes and packets
self.logger.info('Flowseries: [bytes, packets]')
res = vns.post_query(
'FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['bytes', 'packets'],
where_clause='vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == generator_obj.num_flow_samples)
sorted_res = sorted(res, key=itemgetter('packets', 'bytes'))
flow = []
for f in generator_obj.flows:
for s in f.samples:
flow.append({'packets':s.flowdata.diff_packets,
'bytes':s.flowdata.diff_bytes})
sorted_flow = sorted(flow, key=itemgetter('packets', 'bytes'))
assert(sorted_res == sorted_flow)
# 10. Timestamp
self.logger.info('Flowseries: [T]')
# testing for flows at index 1 in generator_obj.flows
flow = generator_obj.flows[1]
res = vns.post_query(
'FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['T'],
where_clause='sourcevn=%s' %(flow.sourcevn) +
'AND destvn=%s AND sport= %d' %(flow.destvn, flow.sport) +
'AND dport=%d AND protocol=%d' %(flow.dport, flow.protocol) +
'AND vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == len(flow.samples))
sorted_res = sorted(res, key=itemgetter('T'))
cnt = 0
for f in flow.samples:
assert(sorted_res[cnt]['T'] == f._timestamp)
cnt+= 1
# 11. T=<granularity>
self.logger.info('Flowseries: [T=<x>]')
st = str(generator_obj.flow_start_time)
et = str(generator_obj.flow_start_time + (10 * 1000 * 1000))
granularity = 5
gms = 5 * 1000 * 1000
res = vns.post_query(
'FlowSeriesTable', start_time=st, end_time=et,
select_fields=['T=%s' % (granularity)],
where_clause='sourcevn=domain1:admin:vn1' +
'AND destvn=domain1:admin:vn2 AND vrouter=%s'% vrouter)
diff_t = int(et) - int(st)
num_ts = (diff_t/gms) + bool(diff_t%gms)
ts = []
for x in range(num_ts):
ts.append({'T':generator_obj.flow_start_time + (x * gms)})
self.logger.info(str(res))
assert(res == ts)
# 12. Flow tuple
self.logger.info('Flowseries: [protocol, sport, dport]')
res = vns.post_query(
'FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['protocol', 'sport', 'dport'],
where_clause='vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == generator_obj.num_flow_samples)
for flow in generator_obj.flows:
found = 0
for r in res:
if flow.sport == r['sport']:
assert(r['dport'] == flow.dport)
assert(r['protocol'] == flow.protocol)
found = 1
assert(found)
# 13. T + flow tuple
self.logger.info('Flowseries: [T, protocol, sport, dport]')
res = vns.post_query(
'FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['T', 'protocol', 'sport', 'dport'],
where_clause='vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == generator_obj.num_flow_samples)
for flow in generator_obj.flows:
sport = flow.sport
for sample in flow.samples:
found = 0
for r in res:
if r['T'] == sample._timestamp and r['sport'] == sport:
assert(r['protocol'] == flow.protocol)
assert(r['dport'] == flow.dport)
found = 1
break
assert(found)
# 14. T + flow tuple + stats
self.logger.info('Flowseries: [T, protocol, sport, dport, bytes, packets]')
res = vns.post_query(
'FlowSeriesTable',
start_time=str(generator_obj.flow_start_time),
end_time=str(generator_obj.flow_end_time),
select_fields=['T', 'protocol', 'sport', 'dport', 'bytes', 'packets'],
where_clause='vrouter=%s'% vrouter)
self.logger.info(str(res))
assert(len(res) == generator_obj.num_flow_samples)
for flow in generator_obj.flows:
sport = flow.sport
for sample in flow.samples:
found = 0
for r in res:
if r['T'] == sample._timestamp and r['sport'] == sport:
assert(r['protocol'] == flow.protocol)
assert(r['dport'] == flow.dport)
assert(r['bytes'] == sample.flowdata.diff_bytes)
assert(r['packets'] == sample.flowdata.diff_packets)
found = 1
break
assert(found)
# 15 vrouter
self.logger.info("Flowseries: [sourcevn, destvn, vrouter]")
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
generator_obj1 = generator_object[1]
res = vns.post_query('FlowSeriesTable',
start_time=str(generator_obj1.flow_start_time),
end_time=str(generator_obj1.flow_end_time),
select_fields=['sourcevn', 'destvn', 'vrouter'], dir=1, where_clause='')
self.logger.info(str(res))
assert(len(res) == (generator_obj1.num_flow_samples + generator_obj.num_flow_samples))
sorted_res = sorted(res, key=itemgetter('vrouter'))
exp_result = []
for flow in generator_obj1.flows:
for f in flow.samples:
dict = {'vrouter':f._source, 'destvn':f.flowdata.destvn, 'sourcevn':f.flowdata.sourcevn}
exp_result.append(dict)
for flow in generator_obj.flows:
for f in flow.samples:
dict = {'vrouter':f._source, 'destvn':f.flowdata.destvn, 'sourcevn':f.flowdata.sourcevn}
exp_result.append(dict)
sorted_exp_result = sorted(exp_result, key=itemgetter('vrouter'))
assert(sorted_res == sorted_exp_result)
return True
# end verify_flow_series_aggregation_binning
def verify_fieldname_messagetype(self):
self.logger.info('Verify stats table for stats name field');
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port);
query = Query(table="StatTable.FieldNames.fields",
start_time="now-10m",
end_time="now",
select_fields=["fields.value"],
where=[[{"name": "name", "value": "Message", "op": 7}]])
json_qstr = json.dumps(query.__dict__)
res = vns.post_query_json(json_qstr)
self.logger.info(str(res))
assert(len(res)>1)
return True
def verify_fieldname_objecttype(self):
self.logger.info('Verify stats table for stats name field');
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port);
query = Query(table="ObjectCollectorInfo",
start_time="now-600s",
end_time="now",
select_fields=["ObjectId"]);
json_qstr = json.dumps(query.__dict__)
res = vns.post_query_json(json_qstr)
self.logger.info(str(res))
assert(len(res) > 0)
return True
@retry(delay=2, tries=5)
def verify_collector_redis_uve_connection(self, collector, connected=True):
self.logger.info('verify_collector_redis_uve_connection')
vcl = VerificationCollector('127.0.0.1', collector.http_port)
try:
redis_uve = vcl.get_redis_uve_info()['RedisUveInfo']
if redis_uve['status'] == 'Connected':
return connected
except Exception as err:
self.logger.error('Exception: %s' % err)
return not connected
# end verify_collector_redis_uve_connection
@retry(delay=2, tries=5)
def verify_opserver_redis_uve_connection(self, opserver, connected=True):
self.logger.info('verify_opserver_redis_uve_connection')
vops = VerificationOpsSrv('127.0.0.1', opserver.http_port)
try:
redis_uve = vops.get_redis_uve_info()['RedisUveInfo']
if redis_uve['status'] == 'Connected':
return connected
except Exception as err:
self.logger.error('Exception: %s' % err)
return not connected
# end verify_opserver_redis_uve_connection
@retry(delay=2, tries=5)
def verify_tracebuffer_in_analytics_db(self, src, mod, tracebuf):
self.logger.info('verify trace buffer data in analytics db')
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
where_clause = []
where_clause.append('Source = ' + src)
where_clause.append('ModuleId = ' + mod)
where_clause.append('Category = ' + tracebuf)
where_clause = ' AND '.join(where_clause)
res = vns.post_query('MessageTable', start_time='-3m', end_time='now',
select_fields=['MessageTS', 'Messagetype'],
where_clause=where_clause, filter='Type=4')
if not res:
return False
self.logger.info(str(res))
return True
# end verify_tracebuffer_in_analytics_db
@retry(delay=1, tries=5)
def verify_table_source_module_list(self, exp_src_list, exp_mod_list):
self.logger.info('verify source/module list')
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
try:
src_list = vns.get_table_column_values(COLLECTOR_GLOBAL_TABLE,
SOURCE)
self.logger.info('src_list: %s' % str(src_list))
if len(set(src_list).intersection(exp_src_list)) != \
len(exp_src_list):
return False
mod_list = vns.get_table_column_values(COLLECTOR_GLOBAL_TABLE,
MODULE)
self.logger.info('mod_list: %s' % str(mod_list))
if len(set(mod_list).intersection(exp_mod_list)) != \
len(exp_mod_list):
return False
except Exception as e:
self.logger.error('Exception: %s in getting source/module list' % e)
else:
return True
# end verify_table_source_module_list
@retry(delay=1, tries=5)
def verify_where_query(self):
self.logger.info('Verify where query with int type works');
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port);
query = Query(table="StatTable.QueryPerfInfo.query_stats",
start_time="now-1h",
end_time="now",
select_fields=["query_stats.rows","query_stats.table","query_stats.time"],
where=[[{"name":"query_stats.rows","value":0,"op":1}]])
json_qstr = json.dumps(query.__dict__)
res = vns.post_query_json(json_qstr)
assert(len(res)>0)
return True
# end verify_where_query
@retry(delay=1, tries=5)
def verify_object_table_query(self):
self.logger.info('verify_object_table_query')
vns = VerificationOpsSrv('127.0.0.1', self.opserver_port)
#ObjectTable query with only ObjectId
self.logger.info('ObjectTable query with only ObjectId')
object_id = object_id = self.collectors[0].hostname
res = vns.post_query('ObjectCollectorInfo',
start_time='-10m', end_time='now',
select_fields=['ObjectId'],
where_clause='ObjectId = %s' % object_id)
if not res:
return False
else:
self.logger.info(res)
for r in res:
assert('ObjectId' in r)
# ObjectTable query with ModuleId specified in where clause
self.logger.info('ObjectTable query with ModuleId in where clause')
object_id = object_id = self.collectors[0].hostname
module = 'Collector'
where_obj_id = 'ObjectId = %s' % object_id
where_mod = 'ModuleId = %s' % module
res = vns.post_query('ObjectCollectorInfo',
start_time='-10m', end_time='now',
select_fields=['ObjectId'],
where_clause=where_obj_id + 'AND' + where_mod)
if not res:
return False
else:
self.logger.info(res)
for r in res:
assert('ObjectId' in r)
return True
# end verify_object_table_query
def cleanUp(self):
try:
self.opserver.stop()
except:
pass
try:
self.query_engine.stop()
except:
pass
for collector in self.collectors:
try:
collector.stop()
except:
pass
for redis_uve in self.redis_uves:
redis_uve.stop()
self.redis_query.stop()
super(AnalyticsFixture, self).cleanUp()
@staticmethod
def get_free_port():
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.bind(("", 0))
cport = cs.getsockname()[1]
cs.close()
return cport
@staticmethod
def enable_core():
try:
resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
except:
pass
| 43.085202
| 126
| 0.556501
|
a59463c45ea3400c9a94d95dc3bc86834963ed9a
| 14,100
|
py
|
Python
|
FASHION/run.py
|
sbuschjaeger/gncl
|
b76664c9d585964f9dcede00265263cb7a545654
|
[
"MIT"
] | 3
|
2021-06-21T12:57:46.000Z
|
2021-12-25T08:31:37.000Z
|
FASHION/run.py
|
sbuschjaeger/gncl
|
b76664c9d585964f9dcede00265263cb7a545654
|
[
"MIT"
] | null | null | null |
FASHION/run.py
|
sbuschjaeger/gncl
|
b76664c9d585964f9dcede00265263cb7a545654
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import sys
import pickle
import tarfile
from datetime import datetime
from functools import partial
import argparse
import glob
import numpy as np
import pandas as pd
import torch
import scipy
import torch
from torch import nn
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
from torchsummaryX import summary
# Lets try the newest shit https://github.com/juntang-zhuang/Adabelief-Optimizer
from adabelief_pytorch import AdaBelief
from sklearn.metrics import make_scorer, accuracy_score
from pysembles.Utils import Flatten, Clamp, Scale
from pysembles.Models import Model
from pysembles.E2EEnsembleClassifier import E2EEnsembleClassifier
from pysembles.BaggingClassifier import BaggingClassifier
from pysembles.GNCLClassifier import GNCLClassifier
from pysembles.StackingClassifier import StackingClassifier
from pysembles.DeepDecisionTreeClassifier import DeepDecisionTreeClassifier
from pysembles.SMCLClassifier import SMCLClassifier
from pysembles.GradientBoostedNets import GradientBoostedNets
from pysembles.SnapshotEnsembleClassifier import SnapshotEnsembleClassifier
from pysembles.models.BinarisedNeuralNetworks import BinaryConv2d, BinaryLinear, BinaryTanh
from pysembles.Utils import pytorch_total_params, apply_in_batches, TransformTensorDataset
from experiment_runner.experiment_runner_v2 import run_experiments, get_ctor_arguments
#from ... import MobilenetV3
# sys.path.append("..")
from pysembles.Metrics import accuracy,avg_accurcay,diversity,avg_loss,loss
from pysembles.models.VGG import VGGNet
from pysembles.models.SimpleResNet import SimpleResNet
from pysembles.models.MobileNetV3 import MobileNetV3
from pysembles.models.BinarisedNeuralNetworks import BinaryModel
def train_transformation():
return transforms.Compose([
transforms.RandomCrop(28, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
def test_transformation():
return transforms.Compose([
transforms.ToTensor(),
])
def pre(cfg):
model_ctor = cfg.pop("model")
tmpcfg = cfg
expected = {}
for key in get_ctor_arguments(model_ctor):
if key in tmpcfg:
expected[key] = tmpcfg[key]
model = model_ctor(**expected)
return model
def post(cfg, model):
scores = {}
train_loader = torch.utils.data.DataLoader(cfg["train_data"], **cfg["loader"])
scores["train_loss"] = loss(model, train_loader)
scores["train_accuracy"] = accuracy(model, train_loader)
scores["train_diversity"] = diversity(model, train_loader)
scores["train_loss"] = loss(model, train_loader)
scores["train_avg_loss"] = avg_loss(model, train_loader)
scores["train_avg_accurcay"] = avg_accurcay(model, train_loader)
test_loader = torch.utils.data.DataLoader(cfg["test_data"], **cfg["loader"])
scores["test_loss"] = loss(model, test_loader)
scores["test_accuracy"] = accuracy(model, test_loader)
scores["test_diversity"] = diversity(model, test_loader)
scores["test_loss"] = loss(model, test_loader)
scores["test_avg_loss"] = avg_loss(model, test_loader)
scores["test_avg_accurcay"] = avg_accurcay(model, test_loader)
scores["params"] = pytorch_total_params(model)
return scores
def fit(cfg, model):
checkpoints = glob.glob(os.path.join(cfg["out_path"], "*.tar"))
if len(checkpoints) > 0:
print("Found some checkpoints - loading!")
epochs = [ (int(os.path.basename(fname)[:-4].split("_")[1]), fname) for fname in checkpoints]
# Per default python checks for the first argument in tuples.
_ , checkpoint_to_load = max(epochs)
print("Loading {}".format(checkpoint_to_load))
checkpoint = torch.load(checkpoint_to_load)
model.restore_checkoint(checkpoint_to_load)
model.epochs = cfg["optimizer"]["epochs"]
model.fit(cfg["train_data"])
return model
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--local", help="Run on local machine",action="store_true", default=False)
parser.add_argument("-r", "--ray", help="Run via Ray",action="store_true", default=False)
parser.add_argument("--ray_head", help="Run via Ray",action="store_true", default="auto")
parser.add_argument("--redis_password", help="Run via Ray",action="store_true", default="5241590000000000")
args = parser.parse_args()
if (args.local and args.ray) or (not args.local and not args.ray):
print("Either you specified to use both, ray _and_ local mode or you specified to use none of both. Please choose either. Defaulting to `local` processing.")
args.local = True
if args.local:
basecfg = {
"out_path":os.path.join(datetime.now().strftime('%d-%m-%Y-%H:%M:%S')),
"pre": pre,
"post": post,
"fit": fit,
"backend": "local",
"verbose":False
}
else:
pass
# basecfg = {
# "out_path":os.path.join("FASHION", "results", datetime.now().strftime('%d-%m-%Y-%H:%M:%S')),
# "pre": pre,
# "post": post,
# "fit": fit,
# "backend": "ray",
# "ray_head": args.ray_head,
# "redis_password": args.redis_password,
# "verbose":False
# }
models = []
scheduler = {
"method" : torch.optim.lr_scheduler.StepLR,
"step_size" : 25,
"gamma": 0.5
}
optimizer = {
#"method" : AdaBelief, # torch.optim.Adam, #if "binary" in t else torch.optim.SGD,
"method" : AdaBelief,
"lr" : 1e-2, #1e-3, #if "binary" in t else 0.1,
# "momentum" : 0.9,
# "nesterov" : True,
# "weight_decay" : 1e-4,
"epochs" : 150,
"eps" : 1e-12,
"betas" : (0.9,0.999)
}
loader = {
"num_workers": 1,
"batch_size" : 256,
"pin_memory": True
}
def simpleresnet(size, model_type):
if "small" == size:
n_channels = 32
depth = 4
else:
n_channels = 96
depth = 4
if "binary" == model_type:
return BinaryModel(SimpleResNet(in_channels = 1, n_channels = n_channels, depth = depth, num_classes=10), keep_activation=True)
else:
return SimpleResNet(in_channels = 1, n_channels = n_channels, depth = depth, num_classes=10)
def stacking_classifier(model_type):
classifier = torch.nn.Linear(16*10,10)
if "binary" == model_type:
return BinaryModel(classifier, keep_activation=True)
else:
return classifier
for s in ["small","large"]:
for t in ["binary","float"]:
models.append(
{
"model":Model,
"base_estimator": partial(simpleresnet, size=s, model_type=t),
"optimizer":optimizer,
"scheduler":scheduler,
"loader":loader,
"eval_every":5,
"store_every":0,
"loss_function":nn.CrossEntropyLoss(reduction="none"),
"use_amp":True,
"device":"cuda",
"train_data": torchvision.datasets.FashionMNIST(".", train=True, transform = train_transformation(), download = True),
"test_data": torchvision.datasets.FashionMNIST(".", train=False, transform = test_transformation(), download = True),
"verbose":True
}
)
for m in [16]:
models.append(
{
"model":StackingClassifier,
"base_estimator": partial(simpleresnet, size=s, model_type=t),
"classifier" : partial(stacking_classifier, model_type=t),
"n_estimators":m,
"optimizer":optimizer,
"scheduler":scheduler,
"loader":loader,
"eval_every":5,
"store_every":0,
"loss_function":nn.CrossEntropyLoss(reduction="none"),
"use_amp":True,
"device":"cuda",
"train_data": torchvision.datasets.FashionMNIST(".", train=True, transform = train_transformation()),
"test_data": torchvision.datasets.FashionMNIST(".", train=False, transform = test_transformation()),
"verbose":True
}
)
models.append(
{
"model":BaggingClassifier,
"n_estimators":m,
"train_method":"fast",
"base_estimator": partial(simpleresnet, size=s, model_type=t),
"optimizer":optimizer,
"scheduler":scheduler,
"loader":loader,
"eval_every":5,
"store_every":0,
"loss_function":nn.CrossEntropyLoss(reduction="none"),
"use_amp":True,
"device":"cuda",
"train_data": torchvision.datasets.FashionMNIST(".", train=True, transform = train_transformation()),
"test_data": torchvision.datasets.FashionMNIST(".", train=False, transform = test_transformation()),
"verbose":True
}
)
models.append(
{
"model":GradientBoostedNets,
"n_estimators":m,
"base_estimator": partial(simpleresnet, size=s, model_type=t),
"optimizer":optimizer,
"scheduler":scheduler,
"loader":loader,
"eval_every":5,
"store_every":0,
"loss_function":nn.CrossEntropyLoss(reduction="none"),
"use_amp":True,
"device":"cuda",
"train_data": torchvision.datasets.FashionMNIST(".", train=True, transform = train_transformation()),
"test_data": torchvision.datasets.FashionMNIST(".", train=False, transform = test_transformation()),
"verbose":True
}
)
models.append(
{
"model":SnapshotEnsembleClassifier,
"n_estimators":m,
"list_of_snapshots":[2,3,4,5,10,15,20,25,30,40,50,60,70,80,90],
"base_estimator": partial(simpleresnet, size=s, model_type=t),
"optimizer":optimizer,
"scheduler":scheduler,
"loader":loader,
"eval_every":5,
"store_every":0,
"loss_function":nn.CrossEntropyLoss(reduction="none"),
"use_amp":True,
"device":"cuda",
"train_data": torchvision.datasets.FashionMNIST(".", train=True, transform = train_transformation()),
"test_data": torchvision.datasets.FashionMNIST(".", train=False, transform = test_transformation()),
"verbose":True
}
)
models.append(
{
"model":E2EEnsembleClassifier,
"n_estimators":m,
"base_estimator": partial(simpleresnet, size=s, model_type=t),
"optimizer":optimizer,
"scheduler":scheduler,
"loader":loader,
"eval_every":5,
"store_every":0,
"loss_function":nn.CrossEntropyLoss(reduction="none"),
"use_amp":True,
"device":"cuda",
"train_data": torchvision.datasets.FashionMNIST(".", train=True, transform = train_transformation()),
"test_data": torchvision.datasets.FashionMNIST(".", train=False, transform = test_transformation()),
"verbose":True
}
)
models.append(
{
"model":SMCLClassifier,
"n_estimators":m,
"combination_type":"best",
"base_estimator": partial(simpleresnet, size=s, model_type=t),
"optimizer":optimizer,
"scheduler":scheduler,
"loader":loader,
"eval_every":5,
"store_every":0,
"loss_function":nn.CrossEntropyLoss(reduction="none"),
"use_amp":True,
"device":"cuda",
"train_data": torchvision.datasets.FashionMNIST(".", train=True, transform = train_transformation()),
"test_data": torchvision.datasets.FashionMNIST(".", train=False, transform = test_transformation()),
"verbose":True
}
)
for l_reg in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
models.append(
{
"model":GNCLClassifier,
"n_estimators":m,
"mode":"upper",
"l_reg":l_reg,
"combination_type":"average",
"base_estimator": partial(simpleresnet, size=s, model_type=t),
"optimizer":optimizer,
"scheduler":scheduler,
"loader":loader,
"eval_every":5,
"store_every":0,
"loss_function":nn.CrossEntropyLoss(reduction="none"),
"use_amp":True,
"device":"cuda",
"train_data": torchvision.datasets.FashionMNIST(".", train=True, transform = train_transformation()),
"test_data": torchvision.datasets.FashionMNIST(".", train=False, transform = test_transformation()),
"verbose":True
}
)
try:
base = models[0]["base_estimator"]().cuda()
rnd_input = torch.rand((1, 1, 28, 28)).cuda()
print(summary(base, rnd_input))
except:
pass
run_experiments(basecfg, models)
| 38.419619
| 161
| 0.57227
|
6464f9c5c882d6bc2fa7f2b1cac364997f4cb200
| 28,894
|
py
|
Python
|
scripts/dts/extract_dts_includes.py
|
gaoxiang89/zephyr
|
5925112ee602e55bd62c92d6506f94f96cbc8c21
|
[
"Apache-2.0"
] | 1
|
2021-01-22T15:32:40.000Z
|
2021-01-22T15:32:40.000Z
|
scripts/dts/extract_dts_includes.py
|
cpeniche/zephyr
|
1e91447b99eb8ee93fbc8a3e4a5247ff7e092a12
|
[
"Apache-2.0"
] | null | null | null |
scripts/dts/extract_dts_includes.py
|
cpeniche/zephyr
|
1e91447b99eb8ee93fbc8a3e4a5247ff7e092a12
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# vim: ai:ts=4:sw=4
import sys
from os import listdir
import os, fnmatch
import re
import yaml
import argparse
import collections
from devicetree import parse_file
# globals
compatibles = {}
phandles = {}
aliases = {}
chosen = {}
reduced = {}
def convert_string_to_label(s):
# Transmute ,- to _
s = s.replace("-", "_")
s = s.replace(",", "_")
s = s.replace("@", "_")
return s
def get_all_compatibles(d, name, comp_dict):
if 'props' in d:
compat = d['props'].get('compatible')
enabled = d['props'].get('status')
if enabled == "disabled":
return comp_dict
if compat is not None:
comp_dict[name] = compat
if name != '/':
name += '/'
if isinstance(d, dict):
if d['children']:
for k, v in d['children'].items():
get_all_compatibles(v, name + k, comp_dict)
return comp_dict
def get_aliases(root):
if 'children' in root:
if 'aliases' in root['children']:
for k, v in root['children']['aliases']['props'].items():
aliases[v] = k
return
def get_compat(node):
compat = None
if 'props' in node:
compat = node['props'].get('compatible')
if isinstance(compat, list):
compat = compat[0]
return compat
def get_chosen(root):
if 'children' in root:
if 'chosen' in root['children']:
for k, v in root['children']['chosen']['props'].items():
chosen[k] = v
return
def get_phandles(root, name, handles):
if 'props' in root:
handle = root['props'].get('phandle')
enabled = root['props'].get('status')
if enabled == "disabled":
return
if handle is not None:
phandles[handle] = name
if name != '/':
name += '/'
if isinstance(root, dict):
if root['children']:
for k, v in root['children'].items():
get_phandles(v, name + k, handles)
return
class Loader(yaml.Loader):
def __init__(self, stream):
self._root = os.path.realpath(stream.name)
super(Loader, self).__init__(stream)
Loader.add_constructor('!include', Loader.include)
Loader.add_constructor('!import', Loader.include)
def include(self, node):
if isinstance(node, yaml.ScalarNode):
return self.extractFile(self.construct_scalar(node))
elif isinstance(node, yaml.SequenceNode):
result = []
for filename in self.construct_sequence(node):
result += self.extractFile(filename)
return result
elif isinstance(node, yaml.MappingNode):
result = {}
for k, v in self.construct_mapping(node).iteritems():
result[k] = self.extractFile(v)
return result
else:
print("Error:: unrecognised node type in !include statement")
raise yaml.constructor.ConstructorError
def extractFile(self, filename):
filepath = os.path.join(os.path.dirname(self._root), filename)
if not os.path.isfile(filepath):
# we need to look in bindings/* directories
# take path and back up 1 directory and parse in '/bindings/*'
filepath = os.path.dirname(self._root).split('/')
filepath = '/'.join(filepath[:-1])
for root, dirnames, file in os.walk(filepath):
if fnmatch.filter(file, filename):
filepath = os.path.join(root, filename)
with open(filepath, 'r') as f:
return yaml.load(f, Loader)
def insert_defs(node_address, defs, new_defs, new_aliases):
if node_address in defs:
if 'aliases' in defs[node_address]:
defs[node_address]['aliases'].update(new_aliases)
else:
defs[node_address]['aliases'] = new_aliases
defs[node_address].update(new_defs)
else:
new_defs['aliases'] = new_aliases
defs[node_address] = new_defs
return
def find_node_by_path(nodes, path):
d = nodes
for k in path[1:].split('/'):
d = d['children'][k]
return d
def compress_nodes(nodes, path):
if 'props' in nodes:
status = nodes['props'].get('status')
if status == "disabled":
return
if isinstance(nodes, dict):
reduced[path] = dict(nodes)
reduced[path].pop('children', None)
if path != '/':
path += '/'
if nodes['children']:
for k, v in nodes['children'].items():
compress_nodes(v, path + k)
return
def find_parent_irq_node(node_address):
address = ''
for comp in node_address.split('/')[1:]:
address += '/' + comp
if 'interrupt-parent' in reduced[address]['props']:
interrupt_parent = reduced[address]['props'].get(
'interrupt-parent')
return reduced[phandles[interrupt_parent]]
def find_parent_prop(node_address, prop):
parent_address = ''
for comp in node_address.split('/')[1:-1]:
parent_address += '/' + comp
if prop in reduced[parent_address]['props']:
parent_prop = reduced[parent_address]['props'].get(prop)
else:
raise Exception("Parent of node " + node_address +
" has no " + prop + " property")
return parent_prop
def extract_interrupts(node_address, yaml, y_key, names, defs, def_label):
node = reduced[node_address]
try:
props = list(node['props'].get(y_key))
except:
props = [node['props'].get(y_key)]
irq_parent = find_parent_irq_node(node_address)
l_base = def_label.split('/')
index = 0
while props:
prop_def = {}
prop_alias = {}
l_idx = [str(index)]
if y_key == 'interrupts-extended':
cell_parent = reduced[phandles[props.pop(0)]]
name = []
else:
try:
name = [convert_string_to_label(names.pop(0)).upper()]
except:
name = []
cell_parent = irq_parent
cell_yaml = yaml[get_compat(cell_parent)]
l_cell_prefix = [yaml[get_compat(irq_parent)].get(
'cell_string', []).upper()]
for i in range(cell_parent['props']['#interrupt-cells']):
l_cell_name = [cell_yaml['#cells'][i].upper()]
if l_cell_name == l_cell_prefix:
l_cell_name = []
l_fqn = '_'.join(l_base + l_cell_prefix + l_idx + l_cell_name)
prop_def[l_fqn] = props.pop(0)
if len(name):
alias_list = l_base + l_cell_prefix + name + l_cell_name
prop_alias['_'.join(alias_list)] = l_fqn
index += 1
insert_defs(node_address, defs, prop_def, prop_alias)
return
def extract_reg_prop(node_address, names, defs, def_label, div, post_label):
reg = reduced[node_address]['props']['reg']
if type(reg) is not list: reg = [ reg ]
props = list(reg)
address_cells = reduced['/']['props'].get('#address-cells')
size_cells = reduced['/']['props'].get('#size-cells')
address = ''
for comp in node_address.split('/')[1:-1]:
address += '/' + comp
address_cells = reduced[address]['props'].get(
'#address-cells', address_cells)
size_cells = reduced[address]['props'].get('#size-cells', size_cells)
if post_label is None:
post_label = "BASE_ADDRESS"
index = 0
l_base = def_label.split('/')
l_addr = [convert_string_to_label(post_label).upper()]
l_size = ["SIZE"]
while props:
prop_def = {}
prop_alias = {}
addr = 0
size = 0
l_idx = [str(index)]
try:
name = [names.pop(0).upper()]
except:
name = []
for x in range(address_cells):
addr += props.pop(0) << (32 * x)
for x in range(size_cells):
size += props.pop(0) << (32 * x)
l_addr_fqn = '_'.join(l_base + l_addr + l_idx)
l_size_fqn = '_'.join(l_base + l_size + l_idx)
if address_cells:
prop_def[l_addr_fqn] = hex(addr)
if size_cells:
prop_def[l_size_fqn] = int(size / div)
if len(name):
if address_cells:
prop_alias['_'.join(l_base + name + l_addr)] = l_addr_fqn
if size_cells:
prop_alias['_'.join(l_base + name + l_size)] = l_size_fqn
if index == 0:
if address_cells:
prop_alias['_'.join(l_base + l_addr)] = l_addr_fqn
if size_cells:
prop_alias['_'.join(l_base + l_size)] = l_size_fqn
insert_defs(node_address, defs, prop_def, prop_alias)
# increment index for definition creation
index += 1
return
def extract_cells(node_address, yaml, y_key, names, index, prefix, defs,
def_label):
try:
props = list(reduced[node_address]['props'].get(y_key))
except:
props = [reduced[node_address]['props'].get(y_key)]
cell_parent = reduced[phandles[props.pop(0)]]
try:
cell_yaml = yaml[get_compat(cell_parent)]
except:
raise Exception(
"Could not find yaml description for " + cell_parent['name'])
try:
name = names.pop(0).upper()
except:
name = []
l_cell = [str(cell_yaml.get('cell_string', ''))]
l_base = def_label.split('/')
l_base += prefix
l_idx = [str(index)]
prop_def = {}
prop_alias = {}
for k in cell_parent['props'].keys():
if k[0] == '#' and '-cells' in k:
for i in range(cell_parent['props'].get(k)):
l_cellname = [str(cell_yaml['#cells'][i]).upper()]
if l_cell == l_cellname:
label = l_base + l_cell + l_idx
else:
label = l_base + l_cell + l_cellname + l_idx
label_name = l_base + name + l_cellname
prop_def['_'.join(label)] = props.pop(0)
if len(name):
prop_alias['_'.join(label_name)] = '_'.join(label)
if index == 0:
prop_alias['_'.join(label[:-1])] = '_'.join(label)
insert_defs(node_address, defs, prop_def, prop_alias)
# recurse if we have anything left
if len(props):
extract_cells(node_address, yaml, y_key, names,
index + 1, prefix, defs, def_label)
return
def extract_pinctrl(node_address, yaml, pinconf, names, index, defs,
def_label):
prop_list = []
if not isinstance(pinconf, list):
prop_list.append(pinconf)
else:
prop_list = list(pinconf)
def_prefix = def_label.split('_')
prop_def = {}
for p in prop_list:
pin_node_address = phandles[p]
parent_address = '/'.join(pin_node_address.split('/')[:-1])
pin_subnode = '/'.join(pin_node_address.split('/')[-1:])
pin_parent = reduced[parent_address]
cell_yaml = yaml[get_compat(pin_parent)]
cell_prefix = cell_yaml.get('cell_string', None)
post_fix = []
if cell_prefix is not None:
post_fix.append(cell_prefix)
for subnode in reduced.keys():
if pin_subnode in subnode and pin_node_address != subnode:
# found a subnode underneath the pinmux handle
pin_label = def_prefix + post_fix + subnode.split('/')[-2:]
for i, cells in enumerate(reduced[subnode]['props']):
key_label = list(pin_label) + \
[cell_yaml['#cells'][0]] + [str(i)]
func_label = key_label[:-2] + \
[cell_yaml['#cells'][1]] + [str(i)]
key_label = convert_string_to_label(
'_'.join(key_label)).upper()
func_label = convert_string_to_label(
'_'.join(func_label)).upper()
prop_def[key_label] = cells
prop_def[func_label] = \
reduced[subnode]['props'][cells]
insert_defs(node_address, defs, prop_def, {})
def extract_single(node_address, yaml, prop, key, prefix, defs, def_label):
prop_def = {}
if isinstance(prop, list):
for i, p in enumerate(prop):
k = convert_string_to_label(key).upper()
label = def_label + '_' + k
if isinstance(p, str):
p = "\"" + p + "\""
prop_def[label + '_' + str(i)] = p
else:
k = convert_string_to_label(key).upper()
label = def_label + '_' + k
if prop == 'parent-label':
prop = find_parent_prop(node_address, 'label')
if isinstance(prop, str):
prop = "\"" + prop + "\""
prop_def[label] = prop
if node_address in defs:
defs[node_address].update(prop_def)
else:
defs[node_address] = prop_def
return
def extract_string_prop(node_address, yaml, key, label, defs):
prop_def = {}
node = reduced[node_address]
prop = node['props'][key]
k = convert_string_to_label(key).upper()
prop_def[label] = "\"" + prop + "\""
if node_address in defs:
defs[node_address].update(prop_def)
else:
defs[node_address] = prop_def
return
def get_node_label(node_compat, node_address):
def_label = convert_string_to_label(node_compat.upper())
if '@' in node_address:
def_label += '_' + node_address.split('@')[-1].upper()
else:
def_label += convert_string_to_label(node_address.upper())
return def_label
def extract_property(node_compat, yaml, node_address, y_key, y_val, names,
prefix, defs, label_override):
if 'base_label' in yaml[node_compat]:
def_label = yaml[node_compat].get('base_label')
else:
def_label = get_node_label(node_compat, node_address)
if 'parent' in yaml[node_compat]:
if 'bus' in yaml[node_compat]['parent']:
# get parent label
parent_address = ''
for comp in node_address.split('/')[1:-1]:
parent_address += '/' + comp
#check parent has matching child bus value
try:
parent_yaml = \
yaml[reduced[parent_address]['props']['compatible']]
parent_bus = parent_yaml['child']['bus']
except (KeyError, TypeError) as e:
raise Exception(str(node_address) + " defines parent " +
str(parent_address) + " as bus master but " +
str(parent_address) + " not configured as bus master " +
"in yaml description")
if parent_bus != yaml[node_compat]['parent']['bus']:
bus_value = yaml[node_compat]['parent']['bus']
raise Exception(str(node_address) + " defines parent " +
str(parent_address) + " as " + bus_value +
" bus master but " + str(parent_address) +
" configured as " + str(parent_bus) +
" bus master")
# Use parent label to generate label
parent_label = get_node_label(
find_parent_prop(node_address,'compatible') , parent_address)
def_label = parent_label + '_' + def_label
# Generate bus-name define
extract_single(node_address, yaml, 'parent-label',
'bus-name', prefix, defs, def_label)
if label_override is not None:
def_label += '_' + label_override
if y_key == 'reg':
extract_reg_prop(node_address, names, defs, def_label,
1, y_val.get('label', None))
elif y_key == 'interrupts' or y_key == 'interupts-extended':
extract_interrupts(node_address, yaml, y_key, names, defs, def_label)
elif 'pinctrl-' in y_key:
p_index = int(y_key.split('-')[1])
extract_pinctrl(node_address, yaml,
reduced[node_address]['props'][y_key],
names[p_index], p_index, defs, def_label)
elif 'clocks' in y_key:
extract_cells(node_address, yaml, y_key,
names, 0, prefix, defs, def_label)
else:
extract_single(node_address, yaml,
reduced[node_address]['props'][y_key], y_key,
prefix, defs, def_label)
return
def extract_node_include_info(reduced, root_node_address, sub_node_address,
yaml, defs, structs, y_sub):
node = reduced[sub_node_address]
node_compat = get_compat(reduced[root_node_address])
label_override = None
if node_compat not in yaml.keys():
return {}, {}
if y_sub is None:
y_node = yaml[node_compat]
else:
y_node = y_sub
if yaml[node_compat].get('use-property-label', False):
try:
label = y_node['properties']['label']
label_override = convert_string_to_label(
node['props']['label']).upper()
except KeyError:
pass
# check to see if we need to process the properties
for k, v in y_node['properties'].items():
if 'properties' in v:
for c in reduced:
if root_node_address + '/' in c:
extract_node_include_info(
reduced, root_node_address, c, yaml, defs, structs,
v)
if 'generation' in v:
prefix = []
if v.get('use-name-prefix') is not None:
prefix = [convert_string_to_label(k.upper())]
for c in node['props'].keys():
if c.endswith("-names"):
pass
if re.match(k + '$', c):
if 'pinctrl-' in c:
names = node['props'].get('pinctrl-names', [])
else:
names = node['props'].get(c[:-1] + '-names', [])
if not names:
names = node['props'].get(c + '-names', [])
if not isinstance(names, list):
names = [names]
extract_property(
node_compat, yaml, sub_node_address, c, v, names,
prefix, defs, label_override)
return
def dict_merge(dct, merge_dct):
# from https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
def yaml_traverse_inherited(node):
""" Recursive overload procedure inside ``node``
``inherits`` section is searched for and used as node base when found.
Base values are then overloaded by node values
:param node:
:return: node
"""
if 'inherits' in node.keys():
if 'inherits' in node['inherits'].keys():
node['inherits'] = yaml_traverse_inherited(node['inherits'])
dict_merge(node['inherits'], node)
node = node['inherits']
node.pop('inherits')
return node
def yaml_collapse(yaml_list):
collapsed = dict(yaml_list)
for k, v in collapsed.items():
v = yaml_traverse_inherited(v)
collapsed[k]=v
return collapsed
def print_key_value(k, v, tabstop):
label = "#define " + k
# calculate the name's tabs
if len(label) % 8:
tabs = (len(label) + 7) >> 3
else:
tabs = (len(label) >> 3) + 1
sys.stdout.write(label)
for i in range(0, tabstop - tabs + 1):
sys.stdout.write('\t')
sys.stdout.write(str(v))
sys.stdout.write("\n")
return
def generate_keyvalue_file(defs, args):
node_keys = sorted(defs.keys())
for node in node_keys:
sys.stdout.write('# ' + node.split('/')[-1])
sys.stdout.write("\n")
prop_keys = sorted(defs[node].keys())
for prop in prop_keys:
if prop == 'aliases':
for entry in sorted(defs[node][prop]):
a = defs[node][prop].get(entry)
sys.stdout.write("%s=%s\n" % (entry, defs[node].get(a)))
else:
sys.stdout.write("%s=%s\n" % (prop, defs[node].get(prop)))
sys.stdout.write("\n")
def generate_include_file(defs, args):
compatible = reduced['/']['props']['compatible'][0]
sys.stdout.write("/**************************************************\n")
sys.stdout.write(" * Generated include file for " + compatible)
sys.stdout.write("\n")
sys.stdout.write(" * DO NOT MODIFY\n")
sys.stdout.write(" */\n")
sys.stdout.write("\n")
sys.stdout.write("#ifndef _DEVICE_TREE_BOARD_H" + "\n")
sys.stdout.write("#define _DEVICE_TREE_BOARD_H" + "\n")
sys.stdout.write("\n")
node_keys = sorted(defs.keys())
for node in node_keys:
sys.stdout.write('/* ' + node.split('/')[-1] + ' */')
sys.stdout.write("\n")
max_dict_key = lambda d: max(len(k) for k in d.keys())
maxlength = 0
if defs[node].get('aliases'):
maxlength = max_dict_key(defs[node]['aliases'])
maxlength = max(maxlength, max_dict_key(defs[node])) + len('#define ')
if maxlength % 8:
maxtabstop = (maxlength + 7) >> 3
else:
maxtabstop = (maxlength >> 3) + 1
if (maxtabstop * 8 - maxlength) <= 2:
maxtabstop += 1
prop_keys = sorted(defs[node].keys())
for prop in prop_keys:
if prop == 'aliases':
for entry in sorted(defs[node][prop]):
a = defs[node][prop].get(entry)
print_key_value(entry, a, maxtabstop)
else:
print_key_value(prop, defs[node].get(prop), maxtabstop)
sys.stdout.write("\n")
if args.fixup:
for fixup in args.fixup:
if os.path.exists(fixup):
sys.stdout.write("\n")
sys.stdout.write(
"/* Following definitions fixup the generated include */\n")
try:
with open(fixup, "r") as fd:
for line in fd.readlines():
sys.stdout.write(line)
sys.stdout.write("\n")
except:
raise Exception(
"Input file " + os.path.abspath(fixup) +
" does not exist.")
sys.stdout.write("#endif\n")
def lookup_defs(defs, node, key):
if node not in defs:
return None
if key in defs[node]['aliases']:
key = defs[node]['aliases'][key]
return defs[node].get(key, None)
def parse_arguments():
rdh = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description=__doc__, formatter_class=rdh)
parser.add_argument("-d", "--dts", help="DTS file")
parser.add_argument("-y", "--yaml", help="YAML file")
parser.add_argument("-f", "--fixup", action="append",
help="Fixup file, we allow multiple")
parser.add_argument("-k", "--keyvalue", action="store_true",
help="Generate include file for the build system")
return parser.parse_args()
def main():
args = parse_arguments()
if not args.dts or not args.yaml:
print('Usage: %s -d filename.dts -y path_to_yaml' % sys.argv[0])
return 1
try:
with open(args.dts, "r") as fd:
d = parse_file(fd)
except:
raise Exception(
"Input file " + os.path.abspath(args.dts) + " does not exist.")
# compress list to nodes w/ paths, add interrupt parent
compress_nodes(d['/'], '/')
# build up useful lists
compatibles = get_all_compatibles(d['/'], '/', {})
get_phandles(d['/'], '/', {})
get_aliases(d['/'])
get_chosen(d['/'])
# find unique set of compatibles across all active nodes
s = set()
for k, v in compatibles.items():
if isinstance(v, list):
for item in v:
s.add(item)
else:
s.add(v)
# scan YAML files and find the ones we are interested in
yaml_files = []
for root, dirnames, filenames in os.walk(args.yaml):
for filename in fnmatch.filter(filenames, '*.yaml'):
yaml_files.append(os.path.join(root, filename))
yaml_list = {}
file_load_list = set()
for file in yaml_files:
for line in open(file, 'r'):
if re.search('^\s+constraint:*', line):
c = line.split(':')[1].strip()
c = c.strip('"')
if c in s:
if file not in file_load_list:
file_load_list.add(file)
with open(file, 'r') as yf:
yaml_list[c] = yaml.load(yf, Loader)
if yaml_list == {}:
raise Exception("Missing YAML information. Check YAML sources")
# collapse the yaml inherited information
yaml_list = yaml_collapse(yaml_list)
defs = {}
structs = {}
for k, v in reduced.items():
node_compat = get_compat(v)
if node_compat is not None and node_compat in yaml_list:
extract_node_include_info(
reduced, k, k, yaml_list, defs, structs, None)
if defs == {}:
raise Exception("No information parsed from dts file.")
if 'zephyr,flash' in chosen:
node_addr = chosen['zephyr,flash']
extract_reg_prop(chosen['zephyr,flash'], None,
defs, "CONFIG_FLASH", 1024, None)
flash_keys = ["label", "write-block-size", "erase-block-size"]
for key in flash_keys:
if key in reduced[node_addr]['props']:
prop = reduced[node_addr]['props'][key]
extract_single(node_addr, None, prop, key, None, defs, "FLASH")
else:
# We will add address/size of 0 for systems with no flash controller
# This is what they already do in the Kconfig options anyway
defs['dummy-flash'] = {'CONFIG_FLASH_BASE_ADDRESS': 0,
'CONFIG_FLASH_SIZE': 0}
if 'zephyr,sram' in chosen:
extract_reg_prop(chosen['zephyr,sram'], None,
defs, "CONFIG_SRAM", 1024, None)
if 'zephyr,ccm' in chosen:
extract_reg_prop(chosen['zephyr,ccm'], None,
defs, "CONFIG_CCM", 1024, None)
name_dict = {
"CONFIG_UART_CONSOLE_ON_DEV_NAME": "zephyr,console",
"CONFIG_BT_UART_ON_DEV_NAME": "zephyr,bt-uart",
"CONFIG_UART_PIPE_ON_DEV_NAME": "zephyr,uart-pipe",
"CONFIG_BT_MONITOR_ON_DEV_NAME": "zephyr,bt-mon-uart",
"CONFIG_UART_MCUMGR_ON_DEV_NAME": "zephyr,uart-mcumgr",
}
for k, v in name_dict.items():
if v in chosen:
extract_string_prop(chosen[v], None, "label", k, defs)
# only compute the load offset if a code partition exists and it is not the
# same as the flash base address
load_defs = {}
if 'zephyr,code-partition' in chosen and \
'zephyr,flash' in chosen and \
reduced[chosen['zephyr,flash']] is not \
reduced[chosen['zephyr,code-partition']]:
part_defs = {}
extract_reg_prop(chosen['zephyr,code-partition'], None, part_defs,
"PARTITION", 1, 'offset')
part_base = lookup_defs(part_defs, chosen['zephyr,code-partition'],
'PARTITION_OFFSET')
load_defs['CONFIG_FLASH_LOAD_OFFSET'] = part_base
load_defs['CONFIG_FLASH_LOAD_SIZE'] = \
lookup_defs(part_defs, chosen['zephyr,code-partition'],
'PARTITION_SIZE')
else:
load_defs['CONFIG_FLASH_LOAD_OFFSET'] = 0
load_defs['CONFIG_FLASH_LOAD_SIZE'] = 0
insert_defs(chosen['zephyr,flash'], defs, load_defs, {})
# generate include file
if args.keyvalue:
generate_keyvalue_file(defs, args)
else:
generate_include_file(defs, args)
if __name__ == '__main__':
main()
| 31.440696
| 80
| 0.551741
|
6e25eb9849a2464bbd0b335e4a4b7d31cdea73e4
| 4,433
|
py
|
Python
|
denguefever_tw/dengue_linebot/models.py
|
NCKU-CCS/line_bot_server
|
954ac77640466f625cc52c2ca8bacd37e87517a3
|
[
"MIT"
] | 3
|
2016-12-31T15:06:44.000Z
|
2017-10-14T16:25:02.000Z
|
denguefever_tw/dengue_linebot/models.py
|
NCKU-CCS/line_bot_server
|
954ac77640466f625cc52c2ca8bacd37e87517a3
|
[
"MIT"
] | 8
|
2017-06-02T14:21:59.000Z
|
2021-06-09T17:41:54.000Z
|
denguefever_tw/dengue_linebot/models.py
|
NCKU-CCS/line_bot_server
|
954ac77640466f625cc52c2ca8bacd37e87517a3
|
[
"MIT"
] | 3
|
2017-05-26T06:32:59.000Z
|
2017-07-18T01:27:03.000Z
|
from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
import logging
logger = logging.getLogger(__name__)
class LineUser(models.Model):
user_id = models.TextField(primary_key=True)
name = models.TextField()
picture_url = models.TextField(blank=True)
status_message = models.TextField(blank=True)
language = models.TextField(default='zh_tw')
lng = models.FloatField(default=0.0)
lat = models.FloatField(default=0.0)
location = models.ForeignKey('MinArea', null=True, on_delete=models.SET_NULL)
zapper_id = models.TextField(null=True)
def save(self, *args, **kwargs):
if self.lng and self.lat:
try:
self.location = MinArea.objects.get(area__contains=Point(float(self.lng), float(self.lat)))
except MinArea.DoesNotExist:
logger.error('The location of the user can not match any minarea')
super(LineUser, self).save(*args, **kwargs)
def __str__(self):
return '{name} ({user_id})'.format(
name=self.name,
user_id=self.user_id
)
class MinArea(models.Model):
area_id = models.TextField()
area_sn = models.TextField(primary_key=True)
area_name = models.TextField(null=True)
district_name = models.TextField(null=True)
area = models.PolygonField(srid=4326)
def __str__(self):
return ' {district} {area}'.format(
district=self.district_name,
area=self.area_name
)
class Suggestion(models.Model):
content = models.TextField()
user = models.ForeignKey(LineUser, related_name='suggestion', on_delete=models.CASCADE)
def __str__(self):
return '{user}: {content}'.format(
user=self.user.name,
content=self.content
)
class MessageLog(models.Model):
speaker = models.ForeignKey(LineUser, related_name='message_log', on_delete=models.CASCADE)
speak_time = models.DateTimeField()
message_type = models.TextField()
content = models.TextField(null=True, blank=True)
def __str__(self):
speaker = self.speaker
try:
user = LineUser.objects.get(user_id=self.speaker)
speaker = user.name
except LineUser.DoesNotExist:
pass
return '{speak_time}\n{message_type}\n {speaker}: {content}'.format(
speaker=speaker,
message_type=self.message_type,
speak_time=self.speak_time,
content=self.content
)
class BotReplyLog(models.Model):
receiver = models.ForeignKey(LineUser, related_name='bot_reply_log', on_delete=models.CASCADE)
speak_time = models.DateTimeField()
message_type = models.TextField()
content = models.TextField(null=True, blank=True)
def __repr__(self):
return '{speak_time}\n{message_type}\n BOT (to {receiver}): {content}'.format(
receiver=self.receiver,
message_type=self.message_type,
speak_time=self.speak_time,
content=self.content
)
class UnrecognizedMsg(models.Model):
message_log = models.ForeignKey(MessageLog, related_name='unrecognized_message_log', on_delete=models.CASCADE)
def __str__(self):
return str(self.message_log)
class ResponseToUnrecogMsg(models.Model):
unrecognized_msg_content = models.TextField(unique=True)
content = models.TextField()
def __str__(self):
return 'Unrecognized Message: {unrecog_msg}\nResponse: {proper_response}'.format(
unrecog_msg=self.unrecognized_msg_content,
proper_response=self.content
)
class GovReport(models.Model):
user = models.ForeignKey(LineUser, related_name='gov_faculty', on_delete=models.CASCADE)
action = models.TextField()
note = models.TextField()
report_time = models.DateTimeField()
lng = models.FloatField(default=0.0)
lat = models.FloatField(default=0.0)
location = models.PointField(geography=True, srid=4326, default='POINT(0.0 0.0)')
def save(self, **kwargs):
if self.lng and self.lat:
self.location = Point(float(self.lng), float(self.lat))
super(GovReport, self).save(**kwargs)
class ReportZapperMsg(models.Model):
reporter = models.ForeignKey(LineUser, related_name='report_zapper_msg', on_delete=models.CASCADE)
report_time = models.DateTimeField()
content = models.TextField()
| 33.08209
| 114
| 0.671329
|
1d4fa33816e8567379cf4e4af4c2df73d215d590
| 113
|
py
|
Python
|
play/backupFirebaseDB.py
|
WorldViews/FlowerGarden
|
af274812bd4de1b0bb1e1f17898cc2f7853c65a1
|
[
"CC0-1.0"
] | 2
|
2020-06-05T07:50:29.000Z
|
2020-06-05T20:53:55.000Z
|
play/backupFirebaseDB.py
|
WorldViews/FlowerGarden
|
af274812bd4de1b0bb1e1f17898cc2f7853c65a1
|
[
"CC0-1.0"
] | 4
|
2020-07-07T16:51:39.000Z
|
2021-03-08T03:11:50.000Z
|
play/backupFirebaseDB.py
|
WorldViews/FlowerGarden
|
af274812bd4de1b0bb1e1f17898cc2f7853c65a1
|
[
"CC0-1.0"
] | 1
|
2020-07-27T08:29:19.000Z
|
2020-07-27T08:29:19.000Z
|
from FireTopics import FireDB
fdb = FireDB()
#fdb.dump("firebase.db.json")
fdb.saveDB("./firebaseDB.bak.json")
| 16.142857
| 35
| 0.725664
|
7ecee44cce0a1b39c5897bfd60ca5c413207a46d
| 996
|
py
|
Python
|
Back-End/Python/External Libraries/Flask/Flask_Fast-Food/Simple-Form/wtf_form.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 25
|
2021-04-28T02:51:26.000Z
|
2022-03-24T13:58:04.000Z
|
Back-End/Python/External Libraries/Flask/Flask_Fast-Food/Simple-Form/wtf_form.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 1
|
2022-03-03T23:33:41.000Z
|
2022-03-03T23:35:41.000Z
|
Back-End/Python/External Libraries/Flask/Flask_Fast-Food/Simple-Form/wtf_form.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 15
|
2021-05-30T01:35:20.000Z
|
2022-03-25T12:38:25.000Z
|
from wtforms import Form, BooleanField, StringField, PasswordField, validators
class RegistrationForm(Form):
username = StringField('Username', [validators.Length(min=4, max=25)])
email = StringField('Email Address', [validators.Length(min=6, max=35)])
password = PasswordField('New Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('Repeat Password')
accept_tos = BooleanField('I accept the TOS', [validators.DataRequired()])
# In the view function
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm(request.form)
if request.method == 'POST' and form.validate():
user = User(form.username.data, form.email.data,
form.password.data)
db_session.add(user)
flash('Thanks for registering')
return redirect(url_for('login'))
return render_template('register.html', form=form)
| 38.307692
| 78
| 0.678715
|
afc1e4bd5aec36d7dc5d7aa304eec1627c61f5d8
| 77,215
|
py
|
Python
|
python/ray/worker.py
|
amitsadaphule/ray
|
0b9fbc1c0f767e14b80e5bbf92b898ed6c9e1cff
|
[
"Apache-2.0"
] | null | null | null |
python/ray/worker.py
|
amitsadaphule/ray
|
0b9fbc1c0f767e14b80e5bbf92b898ed6c9e1cff
|
[
"Apache-2.0"
] | null | null | null |
python/ray/worker.py
|
amitsadaphule/ray
|
0b9fbc1c0f767e14b80e5bbf92b898ed6c9e1cff
|
[
"Apache-2.0"
] | null | null | null |
from contextlib import contextmanager
import colorama
import atexit
import faulthandler
import hashlib
import inspect
import io
import json
import logging
import os
import redis
import signal
from six.moves import queue
import sys
import threading
import time
import traceback
import random
# Ray modules
import ray.cloudpickle as pickle
import ray.gcs_utils
import ray.memory_monitor as memory_monitor
import ray.node
import ray.parameter
import ray.ray_constants as ray_constants
import ray.remote_function
import ray.serialization as serialization
import ray.services as services
import ray
import setproctitle
import ray.signature
import ray.state
from ray import (
ActorID,
JobID,
ObjectID,
Language,
)
from ray import import_thread
from ray import profiling
from ray.exceptions import (
RayConnectionError,
RayError,
RayTaskError,
ObjectStoreFullError,
)
from ray.function_manager import FunctionActorManager
from ray.utils import (
_random_string,
check_oversized_pickle,
is_cython,
setup_logger,
)
from ray.local_mode_manager import LocalModeManager
SCRIPT_MODE = 0
WORKER_MODE = 1
LOCAL_MODE = 2
ERROR_KEY_PREFIX = b"Error:"
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
class ActorCheckpointInfo:
"""Information used to maintain actor checkpoints."""
__slots__ = [
# Number of tasks executed since last checkpoint.
"num_tasks_since_last_checkpoint",
# Timestamp of the last checkpoint, in milliseconds.
"last_checkpoint_timestamp",
# IDs of the previous checkpoints.
"checkpoint_ids",
]
def __init__(self, num_tasks_since_last_checkpoint,
last_checkpoint_timestamp, checkpoint_ids):
self.num_tasks_since_last_checkpoint = num_tasks_since_last_checkpoint
self.last_checkpoint_timestamp = last_checkpoint_timestamp
self.checkpoint_ids = checkpoint_ids
class Worker:
"""A class used to define the control flow of a worker process.
Note:
The methods in this class are considered unexposed to the user. The
functions outside of this class are considered exposed.
Attributes:
connected (bool): True if Ray has been started and False otherwise.
node (ray.node.Node): The node this worker is attached to.
mode: The mode of the worker. One of SCRIPT_MODE, LOCAL_MODE, and
WORKER_MODE.
cached_functions_to_run (List): A list of functions to run on all of
the workers that should be exported as soon as connect is called.
"""
def __init__(self):
"""Initialize a Worker object."""
self.node = None
self.mode = None
self.cached_functions_to_run = []
self.actor_init_error = None
self.actors = {}
# Information used to maintain actor checkpoints.
self.actor_checkpoint_info = {}
self.actor_task_counter = 0
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray.utils.get_cuda_visible_devices()
self.memory_monitor = memory_monitor.MemoryMonitor()
# A dictionary that maps from driver id to SerializationContext
# TODO: clean up the SerializationContext once the job finished.
self.serialization_context_map = {}
self.function_actor_manager = FunctionActorManager(self)
# This event is checked regularly by all of the threads so that they
# know when to exit.
self.threads_stopped = threading.Event()
# Index of the current session. This number will
# increment every time when `ray.shutdown` is called.
self._session_index = 0
# Functions to run to process the values returned by ray.get. Each
# postprocessor must take two arguments ("object_ids", and "values").
self._post_get_hooks = []
@property
def connected(self):
return self.node is not None
@property
def node_ip_address(self):
self.check_connected()
return self.node.node_ip_address
@property
def load_code_from_local(self):
self.check_connected()
return self.node.load_code_from_local
@property
def current_job_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_current_job_id()
return JobID.nil()
@property
def actor_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_actor_id()
return ActorID.nil()
@property
def current_task_id(self):
return self.core_worker.get_current_task_id()
@property
def current_session_and_job(self):
"""Get the current session index and job id as pair."""
assert isinstance(self._session_index, int)
assert isinstance(self.current_job_id, ray.JobID)
return self._session_index, self.current_job_id
def mark_actor_init_failed(self, error):
"""Called to mark this actor as failed during initialization."""
self.actor_init_error = error
def reraise_actor_init_error(self):
"""Raises any previous actor initialization error."""
if self.actor_init_error is not None:
raise self.actor_init_error
def get_serialization_context(self, job_id=None):
"""Get the SerializationContext of the job that this worker is processing.
Args:
job_id: The ID of the job that indicates which job to get
the serialization context for.
Returns:
The serialization context of the given job.
"""
# This function needs to be protected by a lock, because it will be
# called by`register_class_for_serialization`, as well as the import
# thread, from different threads. Also, this function will recursively
# call itself, so we use RLock here.
if job_id is None:
job_id = self.current_job_id
with self.lock:
if job_id not in self.serialization_context_map:
self.serialization_context_map[
job_id] = serialization.SerializationContext(self)
return self.serialization_context_map[job_id]
def check_connected(self):
"""Check if the worker is connected.
Raises:
Exception: An exception is raised if the worker is not connected.
"""
if not self.connected:
raise RayConnectionError("Ray has not been started yet. You can "
"start Ray with 'ray.init()'.")
def set_mode(self, mode):
"""Set the mode of the worker.
The mode SCRIPT_MODE should be used if this Worker is a driver that is
being run as a Python script or interactively in a shell. It will print
information about task failures.
The mode WORKER_MODE should be used if this Worker is not a driver. It
will not print information about tasks.
The mode LOCAL_MODE should be used if this Worker is a driver and if
you want to run the driver in a manner equivalent to serial Python for
debugging purposes. It will not send remote function calls to the
scheduler and will instead execute them in a blocking fashion.
Args:
mode: One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE.
"""
self.mode = mode
def put_object(self, value, object_id=None, pin_object=True):
"""Put value in the local object store with object id `objectid`.
This assumes that the value for `objectid` has not yet been placed in
the local object store. If the plasma store is full, the worker will
automatically retry up to DEFAULT_PUT_OBJECT_RETRIES times. Each
retry will delay for an exponentially doubling amount of time,
starting with DEFAULT_PUT_OBJECT_DELAY. After this, exception
will be raised.
Args:
value: The value to put in the object store.
object_id (object_id.ObjectID): The object ID of the value to be
put. If None, one will be generated.
pin_object: If set, the object will be pinned at the raylet.
Returns:
object_id.ObjectID: The object ID the object was put under.
Raises:
ray.exceptions.ObjectStoreFullError: This is raised if the attempt
to store the object fails because the object store is full even
after multiple retries.
"""
# Make sure that the value is not an object ID.
if isinstance(value, ObjectID):
raise TypeError(
"Calling 'put' on an ray.ObjectID is not allowed "
"(similarly, returning an ray.ObjectID from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ray.ObjectID in a list and "
"call 'put' on it (or return it).")
serialized_value = self.get_serialization_context().serialize(value)
# This *must* be the first place that we construct this python
# ObjectID because an entry with 0 local references is created when
# the object is Put() in the core worker, expecting that this python
# reference will be created. If another reference is created and
# removed before this one, it will corrupt the state in the
# reference counter.
return ray.ObjectID(
self.core_worker.put_serialized_object(
serialized_value, object_id=object_id, pin_object=pin_object))
def deserialize_objects(self, data_metadata_pairs, object_ids):
context = self.get_serialization_context()
return context.deserialize_objects(data_metadata_pairs, object_ids)
def get_objects(self, object_ids, timeout=None):
"""Get the values in the object store associated with the IDs.
Return the values from the local object store for object_ids. This will
block until all the values for object_ids have been written to the
local object store.
Args:
object_ids (List[object_id.ObjectID]): A list of the object IDs
whose values should be retrieved.
timeout (float): timeout (float): The maximum amount of time in
seconds to wait before returning.
Raises:
Exception if running in LOCAL_MODE and any of the object IDs do not
exist in the emulated object store.
"""
# Make sure that the values are object IDs.
for object_id in object_ids:
if not isinstance(object_id, ObjectID):
raise TypeError(
"Attempting to call `get` on the value {}, "
"which is not an ray.ObjectID.".format(object_id))
if self.mode == LOCAL_MODE:
return self.local_mode_manager.get_objects(object_ids)
timeout_ms = int(timeout * 1000) if timeout else -1
data_metadata_pairs = self.core_worker.get_objects(
object_ids, self.current_task_id, timeout_ms)
return self.deserialize_objects(data_metadata_pairs, object_ids)
def run_function_on_all_workers(self, function,
run_on_other_drivers=False):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
run_on_other_drivers: The boolean that indicates whether we want to
run this function on other drivers. One case is we may need to
share objects across drivers.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.sha1(pickled_function).digest()
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_pickle(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.redis_client.hmset(
key, {
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
"run_on_other_drivers": str(run_on_other_drivers)
})
self.redis_client.rpush("Exports", key)
# TODO(rkn): If the worker fails after it calls setnx and before it
# successfully completes the hmset and rpush, then the program will
# most likely hang. This could be fixed by making these three
# operations into a transaction (or by implementing a custom
# command that does all three things).
def _get_arguments_for_execution(self, function_name, serialized_args):
"""Retrieve the arguments for the remote function.
This retrieves the values for the arguments to the remote function that
were passed in as object IDs. Arguments that were passed by value are
not changed. This is called by the worker that is executing the remote
function.
Args:
function_name (str): The name of the remote function whose
arguments are being retrieved.
serialized_args (List): The arguments to the function. These are
either strings representing serialized objects passed by value
or they are ray.ObjectIDs.
Returns:
The retrieved arguments in addition to the arguments that were
passed by value.
Raises:
RayError: This exception is raised if a task that
created one of the arguments failed.
"""
arguments = [None] * len(serialized_args)
object_ids = []
object_indices = []
for (i, arg) in enumerate(serialized_args):
if isinstance(arg, ObjectID):
object_ids.append(arg)
object_indices.append(i)
else:
# pass the argument by value
arguments[i] = arg
# Get the objects from the local object store.
if len(object_ids) > 0:
values = self.get_objects(object_ids)
for i, value in enumerate(values):
if isinstance(value, RayError):
raise value
else:
arguments[object_indices[i]] = value
return ray.signature.recover_args(arguments)
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks."""
def sigterm_handler(signum, frame):
shutdown(True)
sys.exit(1)
ray.utils.set_sigterm_handler(sigterm_handler)
self.core_worker.run_task_loop()
sys.exit(0)
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
if _mode() == LOCAL_MODE:
raise RuntimeError("ray.get_gpu_ids() currently does not work in "
"local_mode.")
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = [
resource_id for resource_id, _ in all_resource_ids.get("GPU", [])
]
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
return assigned_ids
def get_resource_ids():
"""Get the IDs of the resources that are available to the worker.
Returns:
A dictionary mapping the name of a resource to a list of pairs, where
each pair consists of the ID of a resource and the fraction of that
resource reserved for this worker.
"""
if _mode() == LOCAL_MODE:
raise RuntimeError("ray.get_resource_ids() currently does not work in "
"local_mode.")
return global_worker.core_worker.resource_ids()
def get_webui_url():
"""Get the URL to access the web UI.
Note that the URL does not specify which node the web UI is on.
Returns:
The URL of the web UI as a string.
"""
if _global_node is None:
raise RuntimeError("Ray has not been initialized/connected.")
return _global_node.webui_url
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
_global_node = None
"""ray.node.Node: The global node object that is created by ray.init()."""
def print_failed_task(task_status):
"""Print information about failed tasks.
Args:
task_status (Dict): A dictionary containing the name, operationid, and
error message for a failed task.
"""
logger.error("""
Error: Task failed
Function Name: {}
Task ID: {}
Error Message: \n{}
""".format(task_status["function_name"], task_status["operationid"],
task_status["error_message"]))
def init(address=None,
redis_address=None,
redis_port=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
driver_object_store_memory=None,
redis_max_memory=None,
log_to_driver=True,
node_ip_address=ray_constants.NODE_DEFAULT_IP,
object_id_seed=None,
local_mode=False,
redirect_worker_output=None,
redirect_output=None,
ignore_reinit_error=False,
num_redis_shards=None,
redis_max_clients=None,
redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
plasma_directory=None,
huge_pages=False,
include_java=False,
include_webui=None,
webui_host="localhost",
job_id=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
plasma_store_socket_name=None,
raylet_socket_name=None,
temp_dir=None,
load_code_from_local=False,
use_pickle=True,
_internal_config=None,
lru_evict=False):
"""Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases. Either a Ray cluster already exists and we
just attach this driver to it, or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis, a
raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits.
redis_address (str): Deprecated; same as address.
redis_port (int): The port that the primary Redis shard should listen
to. If None, then a random port will be chosen.
num_cpus (int): Number of cpus the user wishes all raylets to
be configured with.
num_gpus (int): Number of gpus the user wishes all raylets to
be configured with.
resources: A dictionary mapping the name of a resource to the quantity
of that resource available.
memory: The amount of memory (in bytes) that is available for use by
workers requesting memory resources. By default, this is autoset
based on available system memory.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is autoset based on available
system memory, subject to a 20GB cap.
redis_max_memory: The max amount of memory (in bytes) to allow each
redis shard to use. Once the limit is exceeded, redis will start
LRU eviction of entries. This only applies to the sharded redis
tables (task, object, and profile tables). By default, this is
autoset based on available system memory, subject to a 10GB cap.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
node_ip_address (str): The IP address of the node that we are on.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same driver in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different
drivers.
local_mode (bool): True if the code should be executed serially
without Ray. This is useful for debugging.
driver_object_store_memory (int): Limit the amount of memory the driver
can use in the object store for creating objects. By default, this
is autoset based on available system memory, subject to a 20GB cap.
ignore_reinit_error: True if we should suppress errors from calling
ray.init() a second time.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_java: Boolean flag indicating whether to enable java worker.
include_webui: Boolean flag indicating whether to start the web
UI, which displays the status of the Ray cluster. If this argument
is None, then the UI will be started if the relevant dependencies
are present.
webui_host: The host to bind the web UI server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
job_id: The ID of this job.
configure_logging: True if allow the logging cofiguration here.
Otherwise, the users may want to configure it by their own.
logging_level: Logging level, default will be logging.INFO.
logging_format: Logging format, default contains a timestamp,
filename, line number, and message. See ray_constants.py.
plasma_store_socket_name (str): If provided, it will specify the socket
name used by the plasma store.
raylet_socket_name (str): If provided, it will specify the socket path
used by the raylet process.
temp_dir (str): If provided, it will specify the root temporary
directory for the Ray process.
load_code_from_local: Whether code should be loaded from a local module
or from the GCS.
use_pickle: Deprecated.
_internal_config (str): JSON configuration for overriding
RayConfig defaults. For testing purposes ONLY.
lru_evict (bool): If True, when an object store is full, it will evict
objects in LRU order to make more space and when under memory
pressure, ray.UnreconstructableError may be thrown. If False, then
reference counting will be used to decide which objects are safe to
evict and when under memory pressure, ray.ObjectStoreFullError may
be thrown.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
if not use_pickle:
raise DeprecationWarning("The use_pickle argument is deprecated.")
if redis_address is not None:
raise DeprecationWarning("The redis_address argument is deprecated. "
"Please use address instead.")
if redis_address is not None or address is not None:
redis_address, _, _ = services.validate_redis_address(
address, redis_address)
if configure_logging:
setup_logger(logging_level, logging_format)
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.error("Calling ray.init() again after it has already been "
"called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
# Convert hostnames to numerical IP address.
if node_ip_address is not None:
node_ip_address = services.address_to_ip(node_ip_address)
_internal_config = (json.loads(_internal_config)
if _internal_config else {})
# Set the internal config options for LRU eviction.
if lru_evict:
# Turn off object pinning.
if _internal_config.get("object_pinning_enabled", False):
raise Exception(
"Object pinning cannot be enabled if using LRU eviction.")
_internal_config["object_pinning_enabled"] = False
_internal_config["object_store_full_max_retries"] = -1
_internal_config["free_objects_period_milliseconds"] = 1000
global _global_node
if driver_mode == LOCAL_MODE:
# If starting Ray in LOCAL_MODE, don't start any other processes.
_global_node = ray.node.LocalNode()
elif redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
redis_port=redis_port,
node_ip_address=node_ip_address,
object_id_seed=object_id_seed,
local_mode=local_mode,
driver_mode=driver_mode,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
redis_password=redis_password,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
include_java=include_java,
include_webui=include_webui,
webui_host=webui_host,
memory=memory,
object_store_memory=object_store_memory,
redis_max_memory=redis_max_memory,
plasma_store_socket_name=plasma_store_socket_name,
raylet_socket_name=raylet_socket_name,
temp_dir=temp_dir,
load_code_from_local=load_code_from_local,
_internal_config=_internal_config,
)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if num_redis_shards is not None:
raise ValueError("When connecting to an existing cluster, "
"num_redis_shards must not be provided.")
if redis_max_clients is not None:
raise ValueError("When connecting to an existing cluster, "
"redis_max_clients must not be provided.")
if memory is not None:
raise ValueError("When connecting to an existing cluster, "
"memory must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if redis_max_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"redis_max_memory must not be provided.")
if plasma_directory is not None:
raise ValueError("When connecting to an existing cluster, "
"plasma_directory must not be provided.")
if huge_pages:
raise ValueError("When connecting to an existing cluster, "
"huge_pages must not be provided.")
if temp_dir is not None:
raise ValueError("When connecting to an existing cluster, "
"temp_dir must not be provided.")
if plasma_store_socket_name is not None:
raise ValueError("When connecting to an existing cluster, "
"plasma_store_socket_name must not be provided.")
if raylet_socket_name is not None:
raise ValueError("When connecting to an existing cluster, "
"raylet_socket_name must not be provided.")
if _internal_config is not None:
logger.warning(
"When connecting to an existing cluster, "
"_internal_config must match the cluster's _internal_config.")
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
redis_address=redis_address,
redis_password=redis_password,
object_id_seed=object_id_seed,
temp_dir=temp_dir,
load_code_from_local=load_code_from_local,
_internal_config=_internal_config)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=driver_object_store_memory,
job_id=job_id,
internal_config=_internal_config)
for hook in _post_init_hooks:
hook()
return _global_node.address_info
# Functions to run as callback after a successful ray init.
_post_init_hooks = []
def shutdown(exiting_interpreter=False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
if exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect(exiting_interpreter)
# We need to destruct the core worker here because after this function,
# we will tear down any processes spawned by ray.init() and the background
# IO thread in the core worker doesn't currently handle that gracefully.
if hasattr(global_worker, "core_worker"):
del global_worker.core_worker
# Disconnect global state from GCS.
ray.state.state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
# TODO(rkn): Instead of manually resetting some of the worker fields, we
# should simply set "global_worker" to equal "None" or something like that.
global_worker.set_mode(None)
global_worker._post_get_hooks = []
atexit.register(shutdown, True)
# TODO(edoakes): this should only be set in the driver.
def sigterm_handler(signum, frame):
sys.exit(signal.SIGTERM)
try:
ray.utils.set_sigterm_handler(sigterm_handler)
except ValueError:
logger.warning("Failed to set SIGTERM handler, processes might"
"not be cleaned up properly on exit.")
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to redis.
if global_worker.mode == SCRIPT_MODE:
error_message = "".join(traceback.format_tb(tb))
try:
global_worker.redis_client.hmset(
b"Drivers:" + global_worker.worker_id,
{"exception": error_message})
except (ConnectionRefusedError, redis.exceptions.ConnectionError):
logger.warning("Could not push exception to redis.")
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
# The last time we raised a TaskError in this process. We use this value to
# suppress redundant error messages pushed from the workers.
last_task_error_raise_time = 0
# The max amount of seconds to wait before printing out an uncaught error.
UNCAUGHT_ERROR_GRACE_PERIOD = 5
def print_logs(redis_client, threads_stopped):
"""Prints log messages from workers on all of the nodes.
Args:
redis_client: A client to the primary Redis shard.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
pubsub_client = redis_client.pubsub(ignore_subscribe_messages=True)
pubsub_client.subscribe(ray.gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
# Keep track of the number of consecutive log messages that have been
# received with no break in between. If this number grows continually,
# then the worker is probably not able to process the log messages as
# rapidly as they are coming in.
num_consecutive_messages_received = 0
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = pubsub_client.get_message()
if msg is None:
num_consecutive_messages_received = 0
threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
data = json.loads(ray.utils.decode(msg["data"]))
def color_for(data):
if data["pid"] == "raylet":
return colorama.Fore.YELLOW
else:
return colorama.Fore.CYAN
if data["ip"] == localhost:
for line in data["lines"]:
print("{}{}(pid={}){} {}".format(
colorama.Style.DIM, color_for(data), data["pid"],
colorama.Style.RESET_ALL, line))
else:
for line in data["lines"]:
print("{}{}(pid={}, ip={}){} {}".format(
colorama.Style.DIM, color_for(data), data["pid"],
data["ip"], colorama.Style.RESET_ALL, line))
if (num_consecutive_messages_received % 100 == 0
and num_consecutive_messages_received > 0):
logger.warning(
"The driver may not be able to keep up with the "
"stdout/stderr of the workers. To avoid forwarding logs "
"to the driver, use 'ray.init(log_to_driver=False)'.")
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error("print_logs: {}".format(e))
finally:
# Close the pubsub client to avoid leaking file descriptors.
pubsub_client.close()
def print_error_messages_raylet(task_error_queue, threads_stopped):
"""Prints message received in the given output queue.
This checks periodically if any un-raised errors occured in the background.
Args:
task_error_queue (queue.Queue): A queue used to receive errors from the
thread that listens to Redis.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
try:
error, t = task_error_queue.get(block=False)
except queue.Empty:
threads_stopped.wait(timeout=0.01)
continue
# Delay errors a little bit of time to attempt to suppress redundant
# messages originating from the worker.
while t + UNCAUGHT_ERROR_GRACE_PERIOD > time.time():
threads_stopped.wait(timeout=1)
if threads_stopped.is_set():
break
if t < last_task_error_raise_time + UNCAUGHT_ERROR_GRACE_PERIOD:
logger.debug("Suppressing error from worker: {}".format(error))
else:
logger.error(
"Possible unhandled error from worker: {}".format(error))
def listen_error_messages_raylet(worker, task_error_queue, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to the output queue.
Args:
worker: The worker class that this thread belongs to.
task_error_queue (queue.Queue): A queue used to communicate with the
thread that prints the errors found by this thread.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
worker.error_message_pubsub_client = worker.redis_client.pubsub(
ignore_subscribe_messages=True)
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
# Really we should just subscribe to the errors for this specific job.
# However, currently all errors seem to be published on the same channel.
error_pubsub_channel = str(
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB")).encode("ascii")
worker.error_message_pubsub_client.subscribe(error_pubsub_channel)
# worker.error_message_pubsub_client.psubscribe("*")
try:
# Get the errors that occurred before the call to subscribe.
error_messages = ray.errors()
for error_message in error_messages:
logger.error(error_message)
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = worker.error_message_pubsub_client.get_message()
if msg is None:
threads_stopped.wait(timeout=0.01)
continue
gcs_entry = ray.gcs_utils.GcsEntry.FromString(msg["data"])
assert len(gcs_entry.entries) == 1
error_data = ray.gcs_utils.ErrorTableData.FromString(
gcs_entry.entries[0])
job_id = error_data.job_id
if job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary()
]:
continue
error_message = error_data.error_message
if (error_data.type == ray_constants.TASK_PUSH_ERROR):
# Delay it a bit to see if we can suppress it
task_error_queue.put((error_message, time.time()))
else:
logger.warning(error_message)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error("listen_error_messages_raylet: {}".format(e))
finally:
# Close the pubsub client to avoid leaking file descriptors.
worker.error_message_pubsub_client.close()
def is_initialized():
"""Check if ray.init has been called yet.
Returns:
True if ray.init has already been called and false otherwise.
"""
return ray.worker.global_worker.connected
def connect(node,
mode=WORKER_MODE,
log_to_driver=False,
worker=global_worker,
driver_object_store_memory=None,
job_id=None,
internal_config=None):
"""Connect this worker to the raylet, to Plasma, and to Redis.
Args:
node (ray.node.Node): The node to connect.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and
LOCAL_MODE.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
worker: The ray.Worker instance.
driver_object_store_memory: Limit the amount of memory the driver can
use in the object store when creating objects.
job_id: The ID of job. If it's None, then we will generate one.
internal_config: Dictionary of (str,str) containing internal config
options to override the defaults.
"""
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
# Enable nice stack traces on SIGSEGV etc.
try:
if not faulthandler.is_enabled():
faulthandler.enable(all_threads=False)
except io.UnsupportedOperation:
pass # ignore
ray._raylet.set_internal_config(internal_config)
if mode is not LOCAL_MODE:
# Create a Redis client to primary.
# The Redis client can safely be shared between threads. However,
# that is not true of Redis pubsub clients. See the documentation at
# https://github.com/andymccurdy/redis-py#thread-safety.
worker.redis_client = node.create_redis_client()
# Initialize some fields.
if mode is WORKER_MODE:
# We should not specify the job_id if it's `WORKER_MODE`.
assert job_id is None
job_id = JobID.nil()
# TODO(qwang): Rename this to `worker_id_str` or type to `WorkerID`
worker.worker_id = _random_string()
setproctitle.setproctitle("ray::IDLE")
elif mode is LOCAL_MODE:
if job_id is None:
job_id = JobID.from_int(random.randint(1, 65535))
worker.worker_id = ray.utils.compute_driver_id_from_job(
job_id).binary()
else:
# This is the code path of driver mode.
if job_id is None:
# TODO(qwang): use `GcsClient::GenerateJobId()` here.
job_id = JobID.from_int(
int(worker.redis_client.incr("JobCounter")))
# When tasks are executed on remote workers in the context of multiple
# drivers, the current job ID is used to keep track of which job is
# responsible for the task so that error messages will be propagated to
# the correct driver.
worker.worker_id = ray.utils.compute_driver_id_from_job(
job_id).binary()
if not isinstance(job_id, JobID):
raise TypeError("The type of given job id must be JobID.")
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.node = node
worker.set_mode(mode)
# If running Ray in LOCAL_MODE, there is no need to create call
# create_worker or to start the worker service.
if mode == LOCAL_MODE:
worker.local_mode_manager = LocalModeManager()
return
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray.services.check_version_info(worker.redis_client)
except Exception as e:
if mode == SCRIPT_MODE:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray.utils.push_error_to_driver_through_redis(
worker.redis_client,
ray_constants.VERSION_MISMATCH_PUSH_ERROR,
traceback_str,
job_id=None)
worker.lock = threading.RLock()
# Create an object for interfacing with the global state.
ray.state.state._initialize_global_state(
node.redis_address, redis_password=node.redis_password)
# Register the worker with Redis.
if mode == SCRIPT_MODE:
# The concept of a driver is the same as the concept of a "job".
# Register the driver/job with Redis here.
import __main__ as main
driver_info = {
"node_ip_address": node.node_ip_address,
"driver_id": worker.worker_id,
"start_time": time.time(),
"plasma_store_socket": node.plasma_store_socket_name,
"raylet_socket": node.raylet_socket_name,
"name": (main.__file__
if hasattr(main, "__file__") else "INTERACTIVE MODE")
}
worker.redis_client.hmset(b"Drivers:" + worker.worker_id, driver_info)
elif mode == WORKER_MODE:
# Register the worker with Redis.
worker_dict = {
"node_ip_address": node.node_ip_address,
"plasma_store_socket": node.plasma_store_socket_name,
}
# Check the RedirectOutput key in Redis and based on its value redirect
# worker output and error to their own files.
# This key is set in services.py when Redis is started.
redirect_worker_output_val = worker.redis_client.get("RedirectOutput")
if (redirect_worker_output_val is not None
and int(redirect_worker_output_val) == 1):
log_stdout_file, log_stderr_file = (
node.new_worker_redirected_log_file(worker.worker_id))
# Redirect stdout/stderr at the file descriptor level. If we simply
# set sys.stdout and sys.stderr, then logging from C++ can fail to
# be redirected.
if log_stdout_file is not None:
os.dup2(log_stdout_file.fileno(), sys.stdout.fileno())
if log_stderr_file is not None:
os.dup2(log_stderr_file.fileno(), sys.stderr.fileno())
# We also manually set sys.stdout and sys.stderr because that seems
# to have an affect on the output buffering. Without doing this,
# stdout and stderr are heavily buffered resulting in seemingly
# lost logging statements.
if log_stdout_file is not None:
sys.stdout = log_stdout_file
if log_stderr_file is not None:
sys.stderr = log_stderr_file
# This should always be the first message to appear in the worker's
# stdout and stderr log files. The string "Ray worker pid:" is
# parsed in the log monitor process.
print("Ray worker pid: {}".format(os.getpid()))
print("Ray worker pid: {}".format(os.getpid()), file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
worker_dict["stdout_file"] = os.path.abspath(
(log_stdout_file
if log_stdout_file is not None else sys.stdout).name)
worker_dict["stderr_file"] = os.path.abspath(
(log_stderr_file
if log_stderr_file is not None else sys.stderr).name)
worker.redis_client.hmset(b"Workers:" + worker.worker_id, worker_dict)
else:
raise ValueError("Invalid worker mode. Expected DRIVER or WORKER.")
redis_address, redis_port = node.redis_address.split(":")
gcs_options = ray._raylet.GcsClientOptions(
redis_address,
int(redis_port),
node.redis_password,
)
worker.core_worker = ray._raylet.CoreWorker(
(mode == SCRIPT_MODE),
node.plasma_store_socket_name,
node.raylet_socket_name,
job_id,
gcs_options,
node.get_logs_dir_path(),
node.node_ip_address,
node.node_manager_port,
)
if driver_object_store_memory is not None:
worker.core_worker.set_object_store_client_options(
"ray_driver_{}".format(os.getpid()), driver_object_store_memory)
# Put something in the plasma store so that subsequent plasma store
# accesses will be faster. Currently the first access is always slow, and
# we don't want the user to experience this.
temporary_object_id = ray.ObjectID.from_random()
worker.put_object(1, object_id=temporary_object_id)
ray.internal.free([temporary_object_id])
# Start the import thread
worker.import_thread = import_thread.ImportThread(worker, mode,
worker.threads_stopped)
worker.import_thread.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
q = queue.Queue()
worker.listener_thread = threading.Thread(
target=listen_error_messages_raylet,
name="ray_listen_error_messages",
args=(worker, q, worker.threads_stopped))
worker.printer_thread = threading.Thread(
target=print_error_messages_raylet,
name="ray_print_error_messages",
args=(q, worker.threads_stopped))
worker.listener_thread.daemon = True
worker.listener_thread.start()
worker.printer_thread.daemon = True
worker.printer_thread.start()
if log_to_driver:
worker.logger_thread = threading.Thread(
target=print_logs,
name="ray_print_logs",
args=(worker.redis_client, worker.threads_stopped))
worker.logger_thread.daemon = True
worker.logger_thread.start()
if mode == SCRIPT_MODE:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory))
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory))
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
worker.cached_functions_to_run = None
def disconnect(exiting_interpreter=False):
"""Disconnect this worker from the raylet and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "printer_thread"):
worker.printer_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
worker.node = None # Disconnect the worker from the node.
worker.cached_functions_to_run = []
worker.serialization_context_map.clear()
ray.actor.ActorClassMethodMetadata.reset_cache()
@contextmanager
def _changeproctitle(title, next_title):
setproctitle.setproctitle(title)
yield
setproctitle.setproctitle(next_title)
def register_custom_serializer(cls,
serializer,
deserializer,
use_pickle=False,
use_dict=False,
class_id=None):
"""Registers custom functions for efficient object serialization.
The serializer and deserializer are used when transferring objects of
`cls` across processes and nodes. This can be significantly faster than
the Ray default fallbacks. Wraps `register_custom_serializer` underneath.
Args:
cls (type): The class that ray should use this custom serializer for.
serializer: The custom serializer that takes in a cls instance and
outputs a serialized representation. use_pickle and use_dict
must be False if provided.
deserializer: The custom deserializer that takes in a serialized
representation of the cls and outputs a cls instance. use_pickle
and use_dict must be False if provided.
use_pickle: Deprecated.
use_dict: Deprecated.
class_id (str): Unique ID of the class. Autogenerated if None.
"""
worker = global_worker
worker.check_connected()
if use_pickle:
raise DeprecationWarning(
"`use_pickle` is no longer a valid parameter and will be removed "
"in future versions of Ray. If this breaks your application, "
"see `SerializationContext.register_custom_serializer`.")
if use_dict:
raise DeprecationWarning(
"`use_pickle` is no longer a valid parameter and will be removed "
"in future versions of Ray. If this breaks your application, "
"see `SerializationContext.register_custom_serializer`.")
assert serializer is not None and deserializer is not None
context = global_worker.get_serialization_context()
context.register_custom_serializer(
cls, serializer, deserializer, class_id=class_id)
def show_in_webui(message, key="", dtype="text"):
"""Display message in dashboard.
Display message for the current task or actor in the dashboard.
For example, this can be used to display the status of a long-running
computation.
Args:
message (str): Message to be displayed.
key (str): The key name for the message. Multiple message under
different keys will be displayed at the same time. Messages
under the same key will be overriden.
data_type (str): The type of message for rendering. One of the
following: text, html.
"""
worker = global_worker
worker.check_connected()
acceptable_dtypes = {"text", "html"}
assert dtype in acceptable_dtypes, "dtype accepts only: {}".format(
acceptable_dtypes)
message_wrapped = {"message": message, "dtype": dtype}
message_encoded = json.dumps(message_wrapped).encode()
worker.core_worker.set_webui_display(key.encode(), message_encoded)
# Global varaible to make sure we only send out the warning once
blocking_get_inside_async_warned = False
def get(object_ids, timeout=None):
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ID is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_ids is a list, then the objects
corresponding to each object in the list will be returned.
This method will issue a warning if it's running inside async context,
you can use ``await object_id`` instead of ``ray.get(object_id)``. For
a list of object ids, you can use ``await asyncio.gather(*object_ids)``.
Args:
object_ids: Object ID of the object to get or a list of object IDs to
get.
timeout (Optional[float]): The maximum amount of time in seconds to
wait before returning.
Returns:
A Python object or a list of Python objects.
Raises:
RayTimeoutError: A RayTimeoutError is raised if a timeout is set and
the get takes longer than timeout to return.
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception.
"""
worker = global_worker
worker.check_connected()
if hasattr(
worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio():
global blocking_get_inside_async_warned
if not blocking_get_inside_async_warned:
logger.debug("Using blocking ray.get inside async actor. "
"This blocks the event loop. Please use `await` "
"on object id with asyncio.gather if you want to "
"yield execution to the event loop instead.")
blocking_get_inside_async_warned = True
with profiling.profile("ray.get"):
is_individual_id = isinstance(object_ids, ray.ObjectID)
if is_individual_id:
object_ids = [object_ids]
if not isinstance(object_ids, list):
raise ValueError("'object_ids' must either be an object ID "
"or a list of object IDs.")
global last_task_error_raise_time
# TODO(ujvl): Consider how to allow user to retrieve the ready objects.
values = worker.get_objects(object_ids, timeout=timeout)
for i, value in enumerate(values):
if isinstance(value, RayError):
last_task_error_raise_time = time.time()
if isinstance(value, ray.exceptions.UnreconstructableError):
worker.core_worker.dump_object_store_memory_usage()
if isinstance(value, RayTaskError):
raise value.as_instanceof_cause()
else:
raise value
# Run post processors.
for post_processor in worker._post_get_hooks:
values = post_processor(object_ids, values)
if is_individual_id:
values = values[0]
return values
def put(value, weakref=False):
"""Store an object in the object store.
The object may not be evicted while a reference to the returned ID exists.
Args:
value: The Python object to be stored.
weakref: If set, allows the object to be evicted while a reference
to the returned ID exists. You might want to set this if putting
a lot of objects that you might not need in the future.
Returns:
The object ID assigned to this value.
"""
worker = global_worker
worker.check_connected()
with profiling.profile("ray.put"):
if worker.mode == LOCAL_MODE:
object_id = worker.local_mode_manager.put_object(value)
else:
try:
object_id = worker.put_object(value, pin_object=not weakref)
except ObjectStoreFullError:
logger.info(
"Put failed since the value was either too large or the "
"store was full of pinned objects.")
raise
return object_id
# Global variable to make sure we only send out the warning once.
blocking_wait_inside_async_warned = False
def wait(object_ids, num_returns=1, timeout=None):
"""Return a list of IDs that are ready and a list of IDs that are not.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object IDs.
This method returns two lists. The first list consists of object IDs that
correspond to objects that are available in the object store. The second
list corresponds to the rest of the object IDs (which may or may not be
ready).
Ordering of the input list of object IDs is preserved. That is, if A
precedes B in the input list, and both are in the ready list, then A will
precede B in the ready list. This also holds true if A and B are both in
the remaining list.
This method will issue a warning if it's running inside an async context.
Instead of ``ray.wait(object_ids)``, you can use
``await asyncio.wait(object_ids)``.
Args:
object_ids (List[ObjectID]): List of object IDs for objects that may or
may not be ready. Note that these IDs must be unique.
num_returns (int): The number of object IDs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
Returns:
A list of object IDs that are ready and a list of the remaining object
IDs.
"""
worker = global_worker
if hasattr(worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio(
) and timeout != 0:
global blocking_wait_inside_async_warned
if not blocking_wait_inside_async_warned:
logger.debug("Using blocking ray.wait inside async method. "
"This blocks the event loop. Please use `await` "
"on object id with asyncio.wait. ")
blocking_wait_inside_async_warned = True
if isinstance(object_ids, ObjectID):
raise TypeError(
"wait() expected a list of ray.ObjectID, got a single ray.ObjectID"
)
if not isinstance(object_ids, list):
raise TypeError(
"wait() expected a list of ray.ObjectID, got {}".format(
type(object_ids)))
if timeout is not None and timeout < 0:
raise ValueError("The 'timeout' argument must be nonnegative. "
"Received {}".format(timeout))
for object_id in object_ids:
if not isinstance(object_id, ObjectID):
raise TypeError("wait() expected a list of ray.ObjectID, "
"got list containing {}".format(type(object_id)))
worker.check_connected()
# TODO(swang): Check main thread.
with profiling.profile("ray.wait"):
# When Ray is run in LOCAL_MODE, all functions are run immediately,
# so all objects in object_id are ready.
if worker.mode == LOCAL_MODE:
return object_ids[:num_returns], object_ids[num_returns:]
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(object_ids) == 0:
return [], []
if len(object_ids) != len(set(object_ids)):
raise ValueError("Wait requires a list of unique object IDs.")
if num_returns <= 0:
raise ValueError(
"Invalid number of objects to return %d." % num_returns)
if num_returns > len(object_ids):
raise ValueError("num_returns cannot be greater than the number "
"of objects provided to ray.wait.")
timeout = timeout if timeout is not None else 10**6
timeout_milliseconds = int(timeout * 1000)
ready_ids, remaining_ids = worker.core_worker.wait(
object_ids,
num_returns,
timeout_milliseconds,
worker.current_task_id,
)
return ready_ids, remaining_ids
def kill(actor):
"""Kill an actor forcefully.
This will interrupt any running tasks on the actor, causing them to fail
immediately. Any atexit handlers installed in the actor will still be run.
If you want to kill the actor but let pending tasks finish,
you can call ``actor.__ray_terminate__.remote()`` instead to queue a
termination task.
If this actor is reconstructable, it will be attempted to be reconstructed.
Args:
actor (ActorHandle): Handle to the actor to kill.
"""
if not isinstance(actor, ray.actor.ActorHandle):
raise ValueError("ray.kill() only supported for actors. "
"Got: {}.".format(type(actor)))
worker = ray.worker.global_worker
worker.check_connected()
worker.core_worker.kill_actor(actor._ray_actor_id, False)
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to serialize
remote functions, we don't attempt to serialize the worker object, which
cannot be serialized.
"""
return worker.mode
def make_decorator(num_return_vals=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
max_calls=None,
max_retries=None,
max_reconstructions=None,
worker=None):
def decorator(function_or_class):
if (inspect.isfunction(function_or_class)
or is_cython(function_or_class)):
# Set the remote function default resources.
if max_reconstructions is not None:
raise ValueError("The keyword 'max_reconstructions' is not "
"allowed for remote functions.")
return ray.remote_function.RemoteFunction(
Language.PYTHON, function_or_class, None, num_cpus, num_gpus,
memory, object_store_memory, resources, num_return_vals,
max_calls, max_retries)
if inspect.isclass(function_or_class):
if num_return_vals is not None:
raise TypeError("The keyword 'num_return_vals' is not "
"allowed for actors.")
if max_calls is not None:
raise TypeError("The keyword 'max_calls' is not "
"allowed for actors.")
return ray.actor.make_actor(function_or_class, num_cpus, num_gpus,
memory, object_store_memory, resources,
max_reconstructions)
raise TypeError("The @ray.remote decorator must be applied to "
"either a function or to a class.")
return decorator
def remote(*args, **kwargs):
"""Define a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo:
def method(self):
return 1
It can also be used with specific keyword arguments:
* **num_return_vals:** This is only for *remote functions*. It specifies
the number of object IDs returned by the remote function invocation.
* **num_cpus:** The quantity of CPU cores to reserve for this task or for
the lifetime of the actor.
* **num_gpus:** The quantity of GPUs to reserve for this task or for the
lifetime of the actor.
* **resources:** The quantity of various custom resources to reserve for
this task or for the lifetime of the actor. This is a dictionary mapping
strings (resource names) to numbers.
* **max_calls:** Only for *remote functions*. This specifies the maximum
number of times that a given worker can execute the given remote function
before it must exit (this can be used to address memory leaks in
third-party libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow). By
default this is infinite.
* **max_reconstructions**: Only for *actors*. This specifies the maximum
number of times that the actor should be reconstructed when it dies
unexpectedly. The minimum valid value is 0 (default), which indicates
that the actor doesn't need to be reconstructed. And the maximum valid
value is ray.ray_constants.INFINITE_RECONSTRUCTION.
* **max_retries**: Only for *remote functions*. This specifies the maximum
number of times that the remote function should be rerun when the worker
process executing it crashes unexpectedly. The minimum valid value is 0,
the default is 4 (default), and the maximum valid value is
ray.ray_constants.INFINITE_RECONSTRUCTION.
This can be done as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_return_vals=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Remote task and actor objects returned by @ray.remote can also be
dynamically modified with the same arguments as above using
``.options()`` as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_return_vals=2)
def f():
return 1, 2
g = f.options(num_gpus=2, max_calls=None)
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Bar = Foo.options(num_cpus=1, resources=None)
Running remote actors will be terminated when the actor handle to them
in Python is deleted, which will cause them to complete any outstanding
work and then shut down. If you want to kill them immediately, you can
also call ``ray.kill(actor)``.
"""
worker = global_worker
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
"the arguments 'num_return_vals', 'num_cpus', 'num_gpus', "
"'memory', 'object_store_memory', 'resources', "
"'max_calls', or 'max_reconstructions', like "
"'@ray.remote(num_return_vals=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in [
"num_return_vals",
"num_cpus",
"num_gpus",
"memory",
"object_store_memory",
"resources",
"max_calls",
"max_reconstructions",
"max_retries",
], error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise TypeError("The 'resources' keyword argument must be a "
"dictionary, but received type {}.".format(
type(resources)))
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
# Handle other arguments.
num_return_vals = kwargs.get("num_return_vals")
max_calls = kwargs.get("max_calls")
max_reconstructions = kwargs.get("max_reconstructions")
memory = kwargs.get("memory")
object_store_memory = kwargs.get("object_store_memory")
max_retries = kwargs.get("max_retries")
return make_decorator(
num_return_vals=num_return_vals,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources,
max_calls=max_calls,
max_reconstructions=max_reconstructions,
max_retries=max_retries,
worker=worker)
| 41.225307
| 82
| 0.64753
|
a3d8871f95f40846374d0ae5e93e2afa9506c8e1
| 934
|
py
|
Python
|
solutions/Count Submatrices With All Ones/solution.py
|
nilax97/leetcode-solutions
|
d3c12f2b289662d199510e0431e177bbf3cda121
|
[
"MIT"
] | 3
|
2021-06-06T22:03:15.000Z
|
2021-06-08T08:49:04.000Z
|
solutions/Count Submatrices With All Ones/solution.py
|
nilax97/leetcode-solutions
|
d3c12f2b289662d199510e0431e177bbf3cda121
|
[
"MIT"
] | null | null | null |
solutions/Count Submatrices With All Ones/solution.py
|
nilax97/leetcode-solutions
|
d3c12f2b289662d199510e0431e177bbf3cda121
|
[
"MIT"
] | null | null | null |
class Solution:
def numSubmat(self, mat: List[List[int]]) -> int:
m, n = len(mat), len(mat[0])
#precipitate mat to histogram
for i in range(m):
for j in range(n):
if mat[i][j] and i > 0:
mat[i][j] += mat[i-1][j] #histogram
ans = 0
for i in range(m):
stack = [] #mono-stack of indices of non-decreasing height
cnt = 0
for j in range(n):
while stack and mat[i][stack[-1]] > mat[i][j]:
jj = stack.pop() #start
kk = stack[-1] if stack else -1 #end
cnt -= (mat[i][jj] - mat[i][j])*(jj - kk) #adjust to reflect lower height
cnt += mat[i][j] #count submatrices bottom-right at (i, j)
ans += cnt
stack.append(j)
return ans
| 35.923077
| 93
| 0.421842
|
01f00b261e337f01f29d2e77a2788add9814d4f3
| 169,330
|
py
|
Python
|
tests/test__mongomock.py
|
andriyor/mongomock
|
64cccf51bfb3e15eb65f380bbc65a59d218cf6d3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test__mongomock.py
|
andriyor/mongomock
|
64cccf51bfb3e15eb65f380bbc65a59d218cf6d3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test__mongomock.py
|
andriyor/mongomock
|
64cccf51bfb3e15eb65f380bbc65a59d218cf6d3
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from collections import OrderedDict
import copy
import datetime
import os
from packaging import version
import re
import sys
import time
from unittest import TestCase, skipIf, skipUnless
import uuid
import mongomock
from mongomock import ConfigurationError
from mongomock import Database
from mongomock import helpers
from mongomock import InvalidURI
from mongomock import OperationFailure
try:
from bson import DBRef, decimal128
from bson.objectid import ObjectId
import pymongo
from pymongo import MongoClient as PymongoClient
from pymongo.read_preferences import ReadPreference
except ImportError:
from mongomock.object_id import ObjectId
from tests.utils import DBRef
try:
from bson.code import Code
from bson.regex import Regex
from bson.son import SON
import execjs # noqa pylint: disable=unused-import
_HAVE_MAP_REDUCE = any(r.is_available() for r in execjs.runtimes().values())
except ImportError:
_HAVE_MAP_REDUCE = False
Code = str
from tests.multicollection import MultiCollection
SERVER_VERSION = version.parse(mongomock.SERVER_VERSION)
class InterfaceTest(TestCase):
def test__can_create_db_without_path(self):
self.assertIsNotNone(mongomock.MongoClient())
def test__can_create_db_with_path(self):
self.assertIsNotNone(mongomock.MongoClient('mongodb://localhost'))
def test__can_create_db_with_multiple_pathes(self):
hostnames = ['mongodb://localhost:27017', 'mongodb://localhost:27018']
self.assertIsNotNone(mongomock.MongoClient(hostnames))
def test__repr(self):
self.assertEqual(repr(mongomock.MongoClient()),
"mongomock.MongoClient('localhost', 27017)")
def test__bad_uri_raises(self):
with self.assertRaises(InvalidURI):
mongomock.MongoClient('http://host1')
with self.assertRaises(InvalidURI):
mongomock.MongoClient('://host1')
with self.assertRaises(InvalidURI):
mongomock.MongoClient('mongodb://')
with self.assertRaises(InvalidURI):
mongomock.MongoClient('mongodb://localhost/path/mongodb.sock')
with self.assertRaises(InvalidURI):
mongomock.MongoClient('mongodb://localhost?option')
with self.assertRaises(ValueError):
mongomock.MongoClient('mongodb:host2')
def test__none_uri_host(self):
self.assertIsNotNone(mongomock.MongoClient('host1'))
self.assertIsNotNone(mongomock.MongoClient('//host2'))
self.assertIsNotNone(mongomock.MongoClient('mongodb:12'))
class DatabaseGettingTest(TestCase):
def setUp(self):
super(DatabaseGettingTest, self).setUp()
self.client = mongomock.MongoClient()
def test__getting_database_via_getattr(self):
db1 = self.client.some_database_here
db2 = self.client.some_database_here
self.assertIs(db1, db2)
self.assertIs(db1, self.client['some_database_here'])
self.assertIsInstance(db1, Database)
self.assertIs(db1.client, self.client)
self.assertIs(db2.client, self.client)
def test__getting_database_via_getitem(self):
db1 = self.client['some_database_here']
db2 = self.client['some_database_here']
self.assertIs(db1, db2)
self.assertIs(db1, self.client.some_database_here)
self.assertIsInstance(db1, Database)
def test__drop_database(self):
db = self.client.a
collection = db.a
doc_id = collection.insert_one({'aa': 'bb'}).inserted_id
self.assertEqual(collection.count_documents({'_id': doc_id}), 1)
self.client.drop_database('a')
self.assertEqual(collection.count_documents({'_id': doc_id}), 0)
db = self.client.a
collection = db.a
doc_id = collection.insert_one({'aa': 'bb'}).inserted_id
self.assertEqual(collection.count_documents({'_id': doc_id}), 1)
self.client.drop_database(db)
self.assertEqual(collection.count_documents({'_id': doc_id}), 0)
def test__drop_database_indexes(self):
db = self.client.somedb
collection = db.a
collection.create_index('simple')
collection.create_index([('value', 1)], unique=True)
collection.create_index([('sparsed', 1)], unique=True, sparse=True)
self.client.drop_database('somedb')
# Make sure indexes' rules no longer apply
collection.insert_one({'value': 'not_unique_but_ok', 'sparsed': 'not_unique_but_ok'})
collection.insert_one({'value': 'not_unique_but_ok'})
collection.insert_one({'sparsed': 'not_unique_but_ok'})
self.assertEqual(collection.count_documents({}), 3)
def test__sparse_unique_index(self):
db = self.client.somedb
collection = db.a
collection.create_index([('value', 1)], unique=True, sparse=True)
collection.insert_one({'value': 'should_be_unique'})
collection.insert_one({'simple': 'simple_without_value'})
collection.insert_one({'simple': 'simple_without_value2'})
collection.create_index([('value', 1)], unique=True, sparse=True)
def test__alive(self):
self.assertTrue(self.client.alive())
def test__dereference(self):
db = self.client.a
collection = db.a
to_insert = {'_id': 'a', 'aa': 'bb'}
collection.insert_one(to_insert)
a = db.dereference(DBRef('a', 'a', db.name))
self.assertEqual(to_insert, a)
def test__getting_default_database_valid(self):
def gddb(uri):
client = mongomock.MongoClient(uri)
return client, client.get_default_database()
c, db = gddb('mongodb://host1/foo')
self.assertIsNotNone(db)
self.assertIsInstance(db, Database)
self.assertIs(db.client, c)
self.assertIs(db, c['foo'])
c, db = gddb('mongodb://host1/bar')
self.assertIs(db, c['bar'])
c, db = gddb(r'mongodb://a%00lice:f%00oo@127.0.0.1/t%00est')
self.assertIs(db, c['t\x00est'])
c, db = gddb('mongodb://bob:bar@[::1]:27018/admin')
self.assertIs(db, c['admin'])
c, db = gddb('mongodb://%24am:f%3Azzb%40zz@127.0.0.1/'
'admin%3F?authMechanism=MONGODB-CR')
self.assertIs(db, c['admin?'])
c, db = gddb(['mongodb://localhost:27017/foo', 'mongodb://localhost:27018/foo'])
self.assertIs(db, c['foo'])
# As of pymongo 3.5, get_database() is equivalent to
# the old behavior of get_default_database()
client = mongomock.MongoClient('mongodb://host1/foo')
self.assertIs(client.get_database(), client['foo'])
def test__getting_default_database_invalid(self):
def client(uri):
return mongomock.MongoClient(uri)
c = client('mongodb://host1')
with self.assertRaises(ConfigurationError):
c.get_default_database()
c = client('host1')
with self.assertRaises(ConfigurationError):
c.get_default_database()
c = client('')
with self.assertRaises(ConfigurationError):
c.get_default_database()
c = client('mongodb://host1/')
with self.assertRaises(ConfigurationError):
c.get_default_database()
def test__getting_default_database_with_default_parameter(self):
c = mongomock.MongoClient('mongodb://host1/')
self.assertIs(c.get_default_database('foo'), c['foo'])
self.assertIs(c.get_default_database(default='foo'), c['foo'])
def test__getting_default_database_ignoring_default_parameter(self):
c = mongomock.MongoClient('mongodb://host1/bar')
self.assertIs(c.get_default_database('foo'), c['bar'])
self.assertIs(c.get_default_database(default='foo'), c['bar'])
@skipIf(not helpers.HAVE_PYMONGO, 'pymongo not installed')
def test__getting_default_database_preserves_options(self):
client = mongomock.MongoClient('mongodb://host1/foo')
db = client.get_database(read_preference=ReadPreference.NEAREST)
self.assertEqual(db.name, 'foo')
self.assertEqual(ReadPreference.NEAREST, db.read_preference)
self.assertEqual(ReadPreference.PRIMARY, client.read_preference)
class UTCPlus2(datetime.tzinfo):
def fromutc(self, dt):
return dt + self.utcoffset(dt)
def tzname(self, dt):
return '<dummy UTC+2>'
def utcoffset(self, dt):
return datetime.timedelta(hours=2)
def dst(self, dt):
return datetime.timedelta()
@skipIf(not helpers.HAVE_PYMONGO, 'pymongo not installed')
@skipIf(os.getenv('NO_LOCAL_MONGO'), 'No local Mongo server running')
class _CollectionComparisonTest(TestCase):
"""Compares a fake collection with the real mongo collection implementation
This is done via cross-comparison of the results.
"""
def setUp(self):
super(_CollectionComparisonTest, self).setUp()
self.fake_conn = mongomock.MongoClient()
self.mongo_conn = self._connect_to_local_mongodb()
self.db_name = 'mongomock___testing_db'
self.collection_name = 'mongomock___testing_collection'
self.mongo_conn.drop_database(self.db_name)
self.mongo_collection = self.mongo_conn[self.db_name][self.collection_name]
self.fake_collection = self.fake_conn[self.db_name][self.collection_name]
self.cmp = MultiCollection({
'fake': self.fake_collection,
'real': self.mongo_collection,
})
def _create_compare_for_collection(self, collection_name, db_name=None):
if not db_name:
db_name = self.db_name
mongo_collection = self.mongo_conn[db_name][collection_name]
fake_collection = self.fake_conn[db_name][collection_name]
return MultiCollection({
'fake': fake_collection,
'real': mongo_collection,
})
def _connect_to_local_mongodb(self, num_retries=60):
"""Performs retries on connection refused errors (for travis-ci builds)"""
for retry in range(num_retries):
if retry > 0:
time.sleep(0.5)
try:
return PymongoClient(
host=os.environ.get('TEST_MONGO_HOST', 'localhost'), maxPoolSize=1
)
except pymongo.errors.ConnectionFailure as e:
if retry == num_retries - 1:
raise
if 'connection refused' not in e.message.lower():
raise
def tearDown(self):
super(_CollectionComparisonTest, self).tearDown()
self.mongo_conn.close()
class EqualityCollectionTest(_CollectionComparisonTest):
def test__database_equality(self):
self.assertEqual(self.mongo_conn[self.db_name], self.mongo_conn[self.db_name])
self.assertEqual(self.fake_conn[self.db_name], self.fake_conn[self.db_name])
@skipIf(sys.version_info < (3,), 'Older versions of Python do not handle hashing the same way')
@skipIf(
helpers.PYMONGO_VERSION and helpers.PYMONGO_VERSION < version.parse('3.12'),
"older versions of pymongo didn't have proper hashing")
def test__database_hashable(self):
{self.mongo_conn[self.db_name]} # pylint: disable=pointless-statement
{self.fake_conn[self.db_name]} # pylint: disable=pointless-statement
@skipIf(sys.version_info < (3,), 'Older versions of Python do not handle hashing the same way')
@skipUnless(
helpers.PYMONGO_VERSION and helpers.PYMONGO_VERSION < version.parse('3.12'),
"older versions of pymongo didn't have proper hashing")
def test__database_not_hashable(self):
with self.assertRaises(TypeError):
{self.mongo_conn[self.db_name]} # pylint: disable=pointless-statement
with self.assertRaises(TypeError):
{self.fake_conn[self.db_name]} # pylint: disable=pointless-statement
class MongoClientCollectionTest(_CollectionComparisonTest):
def test__find_is_empty(self):
self.cmp.do.delete_many({})
self.cmp.compare.find()
def test__inserting(self):
self.cmp.do.delete_many({})
data = {'a': 1, 'b': 2, 'c': 'data'}
self.cmp.do.insert_one(data)
self.cmp.compare.find() # single document, no need to ignore order
def test__bulk_insert(self):
objs = [{'a': 2, 'b': {'c': 3}}, {'c': 5}, {'d': 7}]
results_dict = self.cmp.do.insert_many(objs)
for results in results_dict.values():
self.assertEqual(len(results.inserted_ids), len(objs))
self.assertEqual(
len(set(results.inserted_ids)), len(results.inserted_ids),
'Returned object ids not unique!')
self.cmp.compare_ignore_order.find()
def test__insert(self):
if helpers.PYMONGO_VERSION >= version.parse('4.0'):
self.cmp.compare_exceptions.insert({'a': 1})
return
self.cmp.do.insert({'a': 1})
self.cmp.compare.find()
def test__insert_one(self):
self.cmp.do.insert_one({'a': 1})
self.cmp.compare.find()
def test__insert_many(self):
self.cmp.do.insert_many([{'a': 1}, {'a': 2}])
self.cmp.compare.find()
def test__save(self):
# add an item with a non ObjectId _id first.
self.cmp.do.insert_one({'_id': 'b'})
if helpers.PYMONGO_VERSION >= version.parse('4.0'):
self.cmp.compare_exceptions.save({'_id': ObjectId(), 'someProp': 1})
return
self.cmp.do.save({'_id': ObjectId(), 'someProp': 1})
self.cmp.compare_ignore_order.find()
def test__insert_object_id_as_dict(self):
self.cmp.do.delete_many({})
doc_ids = [
# simple top-level dictionary
{'A': 1},
# dict with value as list
{'A': [1, 2, 3]},
# dict with value as dict
{'A': {'sub': {'subsub': 3}}}
]
for doc_id in doc_ids:
_id = {
key: value.inserted_id
for key, value in self.cmp.do.insert_one({'_id': doc_id, 'a': 1}).items()
}
self.assertEqual(_id['fake'], _id['real'])
self.assertEqual(_id['fake'], doc_id)
self.assertEqual(_id['real'], doc_id)
self.assertEqual(type(_id['fake']), type(_id['real']))
self.cmp.compare.find({'_id': doc_id})
docs = self.cmp.compare.find_one({'_id': doc_id})
self.assertEqual(docs['fake']['_id'], doc_id)
self.assertEqual(docs['real']['_id'], doc_id)
self.cmp.do.delete_one({'_id': doc_id})
def test__count(self):
if helpers.PYMONGO_VERSION >= version.parse('4.0'):
self.cmp.compare_exceptions.count()
return
self.cmp.compare.count()
self.cmp.do.insert_one({'a': 1})
self.cmp.compare.count()
self.cmp.do.insert_one({'a': 0})
self.cmp.compare.count()
self.cmp.compare.count({'a': 1})
@skipIf(
helpers.PYMONGO_VERSION and helpers.PYMONGO_VERSION < version.parse('3.8'),
'older version of pymongo does not have count_documents')
def test__count_documents(self):
self.cmp.compare.count_documents({})
self.cmp.do.insert_one({'a': 1})
self.cmp.compare.count_documents({})
self.cmp.do.insert_one({'a': 0})
self.cmp.compare.count_documents({})
self.cmp.compare.count_documents({'a': 1})
self.cmp.compare.count_documents({}, skip=10)
self.cmp.compare.count_documents({}, skip=0)
self.cmp.compare.count_documents({}, skip=10, limit=100)
self.cmp.compare.count_documents({}, skip=10, limit=3)
self.cmp.compare_exceptions.count_documents({}, limit='one')
self.cmp.compare_exceptions.count_documents({}, limit='1')
@skipIf(
helpers.PYMONGO_VERSION and helpers.PYMONGO_VERSION < version.parse('3.8'),
'older version of pymongo does not have estimated_document_count')
def test__estimated_document_count(self):
self.cmp.compare.estimated_document_count()
self.cmp.do.insert_one({'a': 1})
self.cmp.compare.estimated_document_count()
self.cmp.do.insert_one({'a': 0})
self.cmp.compare.estimated_document_count()
if SERVER_VERSION < version.parse('5'):
self.cmp.compare.estimated_document_count(skip=2)
else:
self.cmp.compare_exceptions.estimated_document_count(skip=2)
self.cmp.compare_exceptions.estimated_document_count(filter={'a': 1})
def test__reindex(self):
self.cmp.compare.create_index('a')
self.cmp.do.insert_one({'a': 1})
if helpers.PYMONGO_VERSION >= version.parse('4.0'):
self.cmp.compare_exceptions.reindex()
return
self.cmp.do.reindex()
def test__find_one(self):
self.cmp.do.insert_one({'_id': 'id1', 'name': 'new'})
self.cmp.compare.find_one({'_id': 'id1'})
self.cmp.do.insert_one({'_id': 'id2', 'name': 'another new'})
self.cmp.compare.find_one({'_id': 'id2'}, {'_id': 1})
self.cmp.compare.find_one('id2', {'_id': 1})
def test__find_one_no_args(self):
self.cmp.do.insert_one({'_id': 'new_obj', 'field': 'value'})
self.cmp.compare.find_one()
def test__find_by_attributes(self):
id1 = ObjectId()
self.cmp.do.insert_one({'_id': id1, 'name': 'new'})
self.cmp.do.insert_one({'name': 'another new'})
self.cmp.compare_ignore_order.sort_by(lambda doc: str(doc.get('name', str(doc)))).find()
self.cmp.compare.find({'_id': id1})
def test__find_by_document(self):
self.cmp.do.insert_one({'name': 'new', 'doc': {'key': 'val'}})
self.cmp.do.insert_one({'name': 'another new'})
self.cmp.do.insert_one({'name': 'new', 'doc': {'key': ['val']}})
self.cmp.do.insert_one({'name': 'new', 'doc': {'key': ['val', 'other val']}})
self.cmp.compare_ignore_order.find()
self.cmp.compare.find({'doc': {'key': 'val'}})
self.cmp.compare.find({'doc': {'key': {'$eq': 'val'}}})
def test__find_by_empty_document(self):
self.cmp.do.insert_one({'doc': {'data': 'val'}})
self.cmp.do.insert_one({'doc': {}})
self.cmp.do.insert_one({'doc': None})
self.cmp.compare.find({'doc': {}})
def test__find_by_attributes_return_fields(self):
id1 = ObjectId()
id2 = ObjectId()
self.cmp.do.insert_one(
{'_id': id1, 'name': 'new', 'someOtherProp': 2, 'nestedProp': {'a': 1}})
self.cmp.do.insert_one({'_id': id2, 'name': 'another new'})
self.cmp.compare_ignore_order.find({}, {'_id': 0}) # test exclusion of _id
self.cmp.compare_ignore_order.find({}, {'_id': 1, 'someOtherProp': 1}) # test inclusion
self.cmp.compare_ignore_order.find({}, {'_id': 0, 'someOtherProp': 0}) # test exclusion
self.cmp.compare_ignore_order.find({}, {'_id': 0, 'someOtherProp': 1}) # test mixed _id:0
self.cmp.compare_ignore_order.find({}, {'someOtherProp': 0}) # test no _id, otherProp:0
self.cmp.compare_ignore_order.find({}, {'someOtherProp': 1}) # test no _id, otherProp:1
self.cmp.compare.find({'_id': id1}, {'_id': 0}) # test exclusion of _id
self.cmp.compare.find({'_id': id1}, {'_id': 1, 'someOtherProp': 1}) # test inclusion
self.cmp.compare.find({'_id': id1}, {'_id': 0, 'someOtherProp': 0}) # test exclusion
# test mixed _id:0
self.cmp.compare.find({'_id': id1}, {'_id': 0, 'someOtherProp': 1})
# test no _id, otherProp:0
self.cmp.compare.find({'_id': id1}, {'someOtherProp': 0})
# test no _id, otherProp:1
self.cmp.compare.find({'_id': id1}, {'someOtherProp': 1})
def test__find_by_attributes_return_fields_elemMatch(self):
id = ObjectId()
self.cmp.do.insert_one({
'_id': id,
'owns': [
{'type': 'hat', 'color': 'black'},
{'type': 'hat', 'color': 'green'},
{'type': 't-shirt', 'color': 'black', 'size': 'small'},
{'type': 't-shirt', 'color': 'black'},
{'type': 't-shirt', 'color': 'white'}
],
'hat': 'red'
})
elem = {'$elemMatch': {'type': 't-shirt', 'color': 'black'}}
# test filtering on array field only
self.cmp.compare.find({'_id': id}, {'owns': elem})
# test filtering on array field with inclusion
self.cmp.compare.find({'_id': id}, {'owns': elem, 'hat': 1})
# test filtering on array field with exclusion
self.cmp.compare.find({'_id': id}, {'owns': elem, 'hat': 0})
# test filtering on non array field
self.cmp.compare.find({'_id': id}, {'hat': elem})
# test no match
self.cmp.compare.find({'_id': id}, {'owns': {'$elemMatch': {'type': 'cap'}}})
def test__find_with_expr(self):
self.cmp.do.insert_many([
{'_id': 1, 'a': [5]},
{'_id': 2, 'a': [1, 2, 3]},
{'_id': 3, 'a': []},
])
self.cmp.compare.find({'$expr': {'$eq': [{'$size': ['$a']}, 1]}})
self.cmp.do.insert_one({'_id': 4})
self.cmp.compare_exceptions.find({'$expr': {'$eq': [{'$size': ['$a']}, 1]}})
def test_double_negation(self):
self.cmp.do.insert_many([
{'_id': 1, 'a': 'some str'},
{'_id': 2, 'a': 'another str'},
{'_id': 3, 'a': []},
])
self.cmp.compare.find({'a': {'$not': {'$not': {'$regex': '^some'}}}})
def test__size(self):
id = ObjectId()
self.cmp.do.insert_one({
'_id': id,
'l_string': 1,
'l_tuple': ['a', 'b'],
'null_field': None
})
self.cmp.compare.find({'_id': id})
self.cmp.compare.find({'_id': id, 'l_string': {'$not': {'$size': 0}}})
self.cmp.compare.find({'_id': id, 'l_tuple': {'$size': 2}})
self.cmp.compare.find({'_id': id, 'missing_field': {'$size': 1}})
self.cmp.compare.find({'_id': id, 'null_field': {'$size': 1}})
def test__all_with_other_operators(self):
objs = [{'list': ['a']}, {'list': ['a', 123]}, {'list': ['a', 123, 'xyz']}]
self.cmp.do.insert_many(objs)
self.cmp.compare.find({'list': {'$all': ['a'], '$size': 1}})
self.cmp.compare.find({'list': {'$all': ['a', 123], '$size': 2}})
self.cmp.compare.find({'list': {'$all': ['a', 123, 'xyz'], '$size': 3}})
self.cmp.compare.find({'list': {'$all': ['a'], '$size': 3}})
self.cmp.compare.find({'list': {'$all': ['a', 123], '$in': ['xyz']}})
self.cmp.compare.find({'list': {'$all': ['a', 123, 'xyz'], '$in': ['abcdef']}})
self.cmp.compare.find({'list': {'$all': ['a'], '$eq': ['a']}})
def test__regex_match_non_string(self):
id = ObjectId()
self.cmp.do.insert_one({
'_id': id,
'test': 1
})
self.cmp.compare.find({'_id': id, 'test': {'$regex': '1'}})
def test__regex_match_non_string_in_list(self):
id = ObjectId()
self.cmp.do.insert_one({
'_id': id,
'test': [3, 2, 1]
})
self.cmp.compare.find({'_id': id, 'test': {'$regex': '1'}})
def test__find_by_dotted_attributes(self):
"""Test seaching with dot notation."""
green_bowler = {
'name': 'bob',
'hat': {'color': 'green', 'type': 'bowler'}}
red_bowler = {
'name': 'sam',
'hat': {'color': 'red', 'type': 'bowler'}}
self.cmp.do.insert_one(green_bowler)
self.cmp.do.insert_one(red_bowler)
self.cmp.compare_ignore_order.find()
self.cmp.compare_ignore_order.find({'name': 'sam'})
self.cmp.compare_ignore_order.find({'hat.color': 'green'})
self.cmp.compare_ignore_order.find({'hat.type': 'bowler'})
self.cmp.compare.find({
'hat.color': 'red',
'hat.type': 'bowler'
})
self.cmp.compare.find({
'name': 'bob',
'hat.color': 'red',
'hat.type': 'bowler'
})
self.cmp.compare.find({'hat': 'a hat'})
self.cmp.compare.find({'hat.color.cat': 'red'})
def test__find_empty_array_field(self):
# See #90
self.cmp.do.insert_one({'array_field': []})
self.cmp.compare.find({'array_field': []})
def test__find_non_empty_array_field(self):
# See #90
self.cmp.do.insert_one({'array_field': [['abc']]})
self.cmp.do.insert_one({'array_field': ['def']})
self.cmp.compare.find({'array_field': ['abc']})
self.cmp.compare.find({'array_field': [['abc']]})
self.cmp.compare.find({'array_field': 'def'})
self.cmp.compare.find({'array_field': ['def']})
def test__find_by_objectid_in_list(self):
# See #79
self.cmp.do.insert_one(
{'_id': 'x', 'rel_id': [ObjectId('52d669dcad547f059424f783')]})
self.cmp.compare.find({'rel_id': ObjectId('52d669dcad547f059424f783')})
def test__find_subselect_in_list(self):
# See #78
self.cmp.do.insert_one({'_id': 'some_id', 'a': [{'b': 1, 'c': 2}]})
self.cmp.compare.find_one({'a.b': 1})
def test__find_dict_in_nested_list(self):
# See #539
self.cmp.do.insert_one({'a': {'b': [{'c': 1}]}})
self.cmp.compare.find({'a.b': {'c': 1}})
def test__find_by_regex_object(self):
"""Test searching with regular expression objects."""
bob = {'name': 'bob'}
sam = {'name': 'sam'}
self.cmp.do.insert_one(bob)
self.cmp.do.insert_one(sam)
self.cmp.compare_ignore_order.find()
regex = re.compile('bob|sam')
self.cmp.compare_ignore_order.find({'name': regex})
regex = re.compile('bob|notsam')
self.cmp.compare_ignore_order.find({'name': regex})
self.cmp.compare_ignore_order.find({'name': {'$regex': regex}})
upper_regex = Regex('Bob')
self.cmp.compare_ignore_order.find({'name': {'$regex': upper_regex}})
self.cmp.compare_ignore_order.find({'name': {
'$regex': upper_regex,
'$options': 'i',
}})
self.cmp.compare_ignore_order.find({'name': {
'$regex': upper_regex,
'$options': 'I',
}})
self.cmp.compare_ignore_order.find({'name': {
'$regex': upper_regex,
'$options': 'z',
}})
def test__find_by_regex_string(self):
"""Test searching with regular expression string."""
bob = {'name': 'bob'}
sam = {'name': 'sam'}
self.cmp.do.insert_one(bob)
self.cmp.do.insert_one(sam)
self.cmp.compare_ignore_order.find()
self.cmp.compare_ignore_order.find({'name': {'$regex': 'bob|sam'}})
self.cmp.compare_ignore_order.find({'name': {'$regex': 'bob|notsam'}})
self.cmp.compare_ignore_order.find({'name': {'$regex': 'Bob', '$options': 'i'}})
self.cmp.compare_ignore_order.find({'name': {'$regex': 'Bob', '$options': 'I'}})
self.cmp.compare_ignore_order.find({'name': {'$regex': 'Bob', '$options': 'z'}})
def test__find_in_array_by_regex_object(self):
"""Test searching inside array with regular expression object."""
bob = {'name': 'bob', 'text': ['abcd', 'cde']}
sam = {'name': 'sam', 'text': ['bde']}
self.cmp.do.insert_one(bob)
self.cmp.do.insert_one(sam)
regex = re.compile('^a')
self.cmp.compare_ignore_order.find({'text': regex})
regex = re.compile('e$')
self.cmp.compare_ignore_order.find({'text': regex})
regex = re.compile('bde|cde')
self.cmp.compare_ignore_order.find({'text': regex})
def test__find_in_array_by_regex_string(self):
"""Test searching inside array with regular expression string"""
bob = {'name': 'bob', 'text': ['abcd', 'cde']}
sam = {'name': 'sam', 'text': ['bde']}
self.cmp.do.insert_one(bob)
self.cmp.do.insert_one(sam)
self.cmp.compare_ignore_order.find({'text': {'$regex': '^a'}})
self.cmp.compare_ignore_order.find({'text': {'$regex': 'e$'}})
self.cmp.compare_ignore_order.find({'text': {'$regex': 'bcd|cde'}})
def test__find_by_regex_string_on_absent_field_dont_break(self):
"""Test searching on absent field with regular expression string dont break"""
bob = {'name': 'bob'}
sam = {'name': 'sam'}
self.cmp.do.insert_one(bob)
self.cmp.do.insert_one(sam)
self.cmp.compare_ignore_order.find({'text': {'$regex': 'bob|sam'}})
def test__find_by_elemMatch(self):
self.cmp.do.insert_one({'field': [{'a': 1, 'b': 2}, {'c': 3, 'd': 4}]})
self.cmp.do.insert_one({'field': [{'a': 1, 'b': 4}, {'c': 3, 'd': 8}]})
self.cmp.do.insert_one({'field': 'nonlist'})
self.cmp.do.insert_one({'field': 2})
self.cmp.compare.find({'field': {'$elemMatch': {'b': 1}}})
self.cmp.compare_ignore_order.find({'field': {'$elemMatch': {'a': 1}}})
self.cmp.compare.find({'field': {'$elemMatch': {'b': {'$gt': 3}}}})
def test__find_by_elemMatchDirectQuery(self):
self.cmp.do.insert_many([
{'_id': 0, 'arr': [0, 1, 2, 3, 10]},
{'_id': 1, 'arr': [0, 2, 4, 6]},
{'_id': 2, 'arr': [1, 3, 5, 7]}
])
self.cmp.compare_ignore_order.find({'arr': {'$elemMatch': {'$lt': 10, '$gt': 4}}})
def test__find_in_array(self):
self.cmp.do.insert_one({'field': [{'a': 1, 'b': 2}, {'c': 3, 'd': 4}]})
self.cmp.compare.find({'field.0.a': 1})
self.cmp.compare.find({'field.0.b': 2})
self.cmp.compare.find({'field.1.c': 3})
self.cmp.compare.find({'field.1.d': 4})
self.cmp.compare.find({'field.0': {'$exists': True}})
self.cmp.compare.find({'field.0': {'$exists': False}})
self.cmp.compare.find({'field.0.a': {'$exists': True}})
self.cmp.compare.find({'field.0.a': {'$exists': False}})
self.cmp.compare.find({'field.1.a': {'$exists': True}})
self.cmp.compare.find({'field.1.a': {'$exists': False}})
self.cmp.compare.find(
{'field.0.a': {'$exists': True}, 'field.1.a': {'$exists': False}})
def test__find_in_array_equal_null(self):
self.cmp.do.insert_many([
{'_id': 1, 'shape': [{'color': 'red'}]},
{'_id': 2, 'shape': [{'color': 'yellow'}]},
{'_id': 3, 'shape': [{'color': 'red'}, {'color': 'yellow'}]},
{'_id': 4, 'shape': [{'size': 3}]},
{'_id': 5},
{'_id': 6, 'shape': {'color': ['red', 'yellow']}},
{'_id': 7, 'shape': [{'color': 'red'}, {'color': None}]},
])
self.cmp.compare_ignore_order.find({'shape.color': {'$eq': None}})
self.cmp.compare_ignore_order.find({'shape.color': None})
def test__find_notequal(self):
"""Test searching with operators other than equality."""
bob = {'_id': 1, 'name': 'bob'}
sam = {'_id': 2, 'name': 'sam'}
a_goat = {'_id': 3, 'goatness': 'very'}
self.cmp.do.insert_many([bob, sam, a_goat])
self.cmp.compare_ignore_order.find()
self.cmp.compare_ignore_order.find({'name': {'$ne': 'bob'}})
self.cmp.compare_ignore_order.find({'goatness': {'$ne': 'very'}})
self.cmp.compare_ignore_order.find({'goatness': {'$ne': 'not very'}})
self.cmp.compare_ignore_order.find({'snakeness': {'$ne': 'very'}})
def test__find_notequal_by_value(self):
"""Test searching for None."""
bob = {'_id': 1, 'name': 'bob', 'sheepness': {'sometimes': True}}
sam = {'_id': 2, 'name': 'sam', 'sheepness': {'sometimes': True}}
a_goat = {'_id': 3, 'goatness': 'very', 'sheepness': {}}
self.cmp.do.insert_many([bob, sam, a_goat])
self.cmp.compare_ignore_order.find({'goatness': None})
self.cmp.compare_ignore_order.find({'sheepness.sometimes': None})
def test__find_not(self):
bob = {'_id': 1, 'name': 'bob'}
sam = {'_id': 2, 'name': 'sam'}
self.cmp.do.insert_many([bob, sam])
self.cmp.compare_ignore_order.find()
self.cmp.compare_ignore_order.find({'name': {'$not': {'$ne': 'bob'}}})
self.cmp.compare_ignore_order.find({'name': {'$not': {'$ne': 'sam'}}})
self.cmp.compare_ignore_order.find({'name': {'$not': {'$ne': 'dan'}}})
self.cmp.compare_ignore_order.find({'name': {'$not': {'$eq': 'bob'}}})
self.cmp.compare_ignore_order.find({'name': {'$not': {'$eq': 'sam'}}})
self.cmp.compare_ignore_order.find({'name': {'$not': {'$eq': 'dan'}}})
self.cmp.compare_ignore_order.find({'name': {'$not': re.compile('dan')}})
self.cmp.compare_ignore_order.find({'name': {'$not': Regex('dan')}})
def test__find_not_exceptions(self):
# pylint: disable=expression-not-assigned
self.cmp.do.insert_one(dict(noise='longhorn'))
with self.assertRaises(OperationFailure):
self.mongo_collection.find({'name': {'$not': True}})[0]
with self.assertRaises(OperationFailure):
self.fake_collection.find({'name': {'$not': True}})[0]
with self.assertRaises(OperationFailure):
self.mongo_collection.find({'name': {'$not': []}})[0]
with self.assertRaises(OperationFailure):
self.fake_collection.find({'name': {'$not': []}})[0]
with self.assertRaises(OperationFailure):
self.mongo_collection.find({'name': {'$not': ''}})[0]
with self.assertRaises(OperationFailure):
self.fake_collection.find({'name': {'$not': ''}})[0]
def test__find_compare(self):
self.cmp.do.insert_one(dict(noise='longhorn', sqrd='non numeric'))
for x in range(10):
self.cmp.do.insert_one(dict(num=x, sqrd=x * x))
self.cmp.compare_ignore_order.find({'sqrd': {'$lte': 4}})
self.cmp.compare_ignore_order.find({'sqrd': {'$lt': 4}})
self.cmp.compare_ignore_order.find({'sqrd': {'$gte': 64}})
self.cmp.compare_ignore_order.find({'sqrd': {'$gte': 25, '$lte': 36}})
def test__find_compare_objects(self):
self.cmp.do.insert_many([
{'_id': 1, 'counts': {'circles': 3}},
{'_id': 2, 'counts': {'squares': 0}},
{'_id': 3, 'counts': {'arrows': 15}},
{'_id': 4, 'counts': {'circles': 1}},
{'_id': 5, 'counts': OrderedDict([
('circles', 1),
('arrows', 15),
])},
{'_id': 6, 'counts': OrderedDict([
('arrows', 15),
('circles', 1),
])},
{'_id': 7},
{'_id': 8, 'counts': {}},
{'_id': 9, 'counts': {'circles': 'three'}},
{'_id': 10, 'counts': {'circles': None}},
{'_id': 11, 'counts': {'circles': b'bytes'}},
])
self.cmp.compare_ignore_order.find({'counts': {'$gt': {'circles': 1}}})
def test__find_compare_nested_objects(self):
self.cmp.do.insert_many([
{'_id': 1, 'counts': {'circles': {'blue': 3}}},
{'_id': 2, 'counts': {'squares': 0}},
{'_id': 3, 'counts': {'arrows': {'blue': 2}}},
{'_id': 4, 'counts': {'circles': {}}},
{'_id': 5, 'counts': {'arrows': True}},
])
self.cmp.compare_ignore_order.find(
{'counts': {'$gt': {'circles': {'blue': 1}}}})
def test__find_sets(self):
single = 4
even = [2, 4, 6, 8]
prime = [2, 3, 5, 7]
self.cmp.do.insert_many([
dict(x=single),
dict(x=even),
dict(x=prime),
dict()])
self.cmp.compare_ignore_order.find({'x': {'$in': [7, 8]}})
self.cmp.compare_ignore_order.find({'x': {'$in': [4, 5]}})
self.cmp.compare_ignore_order.find({'x': {'$in': [4, None]}})
self.cmp.compare_ignore_order.find({'x': {'$nin': [2, 5]}})
self.cmp.compare_ignore_order.find({'x': {'$all': [2, 5]}})
self.cmp.compare_ignore_order.find({'x': {'$all': [7, 8]}})
self.cmp.compare_ignore_order.find({'x': 2})
self.cmp.compare_ignore_order.find({'x': 4})
self.cmp.compare_ignore_order.find({'$or': [{'x': 4}, {'x': 2}]})
self.cmp.compare_ignore_order.find({'$or': [{'x': 4}, {'x': 7}]})
self.cmp.compare_ignore_order.find({'$and': [{'x': 2}, {'x': 7}]})
self.cmp.compare_ignore_order.find({'$nor': [{'x': 3}]})
self.cmp.compare_ignore_order.find({'$nor': [{'x': 4}, {'x': 2}]})
def test__find_operators_in_list(self):
self.cmp.do.insert_many([
dict(x=4),
dict(x=[300, 500, 4]),
dict(x=[1200, 300, 1400])])
self.cmp.compare_ignore_order.find({'x': {'$gte': 1100, '$lte': 1250}})
self.cmp.compare_ignore_order.find({'x': {'$gt': 300, '$lt': 400}})
def test__find_sets_regex(self):
self.cmp.do.insert_many([
{'x': '123'},
{'x': ['abc', 'abd']},
])
digits_pat = re.compile(r'^\d+')
str_pat = re.compile(r'^ab[cd]')
non_existing_pat = re.compile(r'^lll')
self.cmp.compare_ignore_order.find({'x': {'$in': [digits_pat]}})
self.cmp.compare_ignore_order.find({'x': {'$in': [str_pat]}})
self.cmp.compare_ignore_order.find({'x': {'$in': [non_existing_pat]}})
self.cmp.compare_ignore_order.find({'x': {'$in': [non_existing_pat, '123']}})
self.cmp.compare_ignore_order.find({'x': {'$nin': [str_pat]}})
self.cmp.compare_ignore_order.find({'x': {'$nin': [non_existing_pat]}})
def test__find_negative_matches(self):
self.cmp.do.insert_many([
{'_id': 1, 'shape': [{'color': 'red'}]},
{'_id': 2, 'shape': [{'color': 'yellow'}]},
{'_id': 3, 'shape': [{'color': 'red'}, {'color': 'yellow'}]},
{'_id': 4, 'shape': [{'size': 3}]},
{'_id': 5},
{'_id': 6, 'shape': {'color': ['red', 'yellow']}},
{'_id': 7, 'shape': {'color': 'red'}},
{'_id': 8, 'shape': {'color': ['blue', 'yellow']}},
{'_id': 9, 'shape': {'color': ['red']}},
])
self.cmp.compare_ignore_order.find({'shape.color': {'$ne': 'red'}})
self.cmp.compare_ignore_order.find({'shape.color': {'$ne': ['red']}})
self.cmp.compare_ignore_order.find({'shape.color': {'$nin': ['blue', 'red']}})
def test__find_ne_multiple_keys(self):
self.cmp.do.insert_many([
{'_id': 1, 'cases': [{'total': 1}]},
{'_id': 2, 'cases': [{'total': 2}]},
{'_id': 3, 'cases': [{'total': 3}]},
{'_id': 4, 'cases': []},
{'_id': 5},
])
self.cmp.compare_ignore_order.find({'cases.total': {'$gt': 1, '$ne': 3}})
self.cmp.compare_ignore_order.find({'cases.total': {'$gt': 1, '$nin': [1, 3]}})
def test__find_and_modify_remove(self):
self.cmp.do.insert_many([{'a': x, 'junk': True} for x in range(10)])
if helpers.PYMONGO_VERSION >= version.parse('4.0'):
self.cmp.compare_exceptions.find_and_modify(
{'a': 2}, remove=True, fields={'_id': False, 'a': True})
return
self.cmp.compare.find_and_modify({'a': 2}, remove=True, fields={'_id': False, 'a': True})
self.cmp.compare_ignore_order.find()
def test__find_one_and_delete(self):
self.cmp.do.insert_many([{'a': i} for i in range(10)])
self.cmp.compare.find_one_and_delete({'a': 5}, {'_id': False})
self.cmp.compare.find()
def test__find_one_and_replace(self):
self.cmp.do.insert_many([{'a': i} for i in range(10)])
self.cmp.compare.find_one_and_replace(
{'a': 5}, {'a': 11}, projection={'_id': False})
self.cmp.compare.find()
def test__find_one_and_update(self):
self.cmp.do.insert_many([{'a': i} for i in range(10)])
self.cmp.compare.find_one_and_update(
{'a': 5}, {'$set': {'a': 11}}, projection={'_id': False})
self.cmp.compare.find()
def test__find_sort_list(self):
self.cmp.do.delete_many({})
for data in ({'a': 1, 'b': 3, 'c': 'data1'},
{'a': 2, 'b': 2, 'c': 'data3'},
{'a': 3, 'b': 1, 'c': 'data2'}):
self.cmp.do.insert_one(data)
self.cmp.compare.find(sort=[('a', 1), ('b', -1)])
self.cmp.compare.find(sort=[('b', 1), ('a', -1)])
self.cmp.compare.find(sort=[('b', 1), ('a', -1), ('c', 1)])
def test__find_sort_list_empty_order(self):
self.cmp.do.delete_many({})
for data in ({'a': 1},
{'a': 2, 'b': -2},
{'a': 3, 'b': 4},
{'a': 4, 'b': b'bin1'},
{'a': 4, 'b': b'bin2'},
{'a': 4, 'b': b'alongbin1'},
{'a': 4, 'b': b'alongbin2'},
{'a': 4, 'b': b'zlongbin1'},
{'a': 4, 'b': b'zlongbin2'}):
self.cmp.do.insert_one(data)
self.cmp.compare.find(sort=[('b', 1)])
self.cmp.compare.find(sort=[('b', -1)])
def test__find_sort_list_nested_doc(self):
self.cmp.do.delete_many({})
for data in ({'root': {'a': 1, 'b': 3, 'c': 'data1'}},
{'root': {'a': 2, 'b': 2, 'c': 'data3'}},
{'root': {'a': 3, 'b': 1, 'c': 'data2'}}):
self.cmp.do.insert_one(data)
self.cmp.compare.find(sort=[('root.a', 1), ('root.b', -1)])
self.cmp.compare.find(sort=[('root.b', 1), ('root.a', -1)])
self.cmp.compare.find(
sort=[
('root.b', 1), ('root.a', -1), ('root.c', 1)])
def test__find_sort_list_nested_list(self):
self.cmp.do.delete_many({})
for data in ({'root': [{'a': 1, 'b': 3, 'c': 'data1'}]},
{'root': [{'a': 2, 'b': 2, 'c': 'data3'}]},
{'root': [{'a': 3, 'b': 1, 'c': 'data2'}]}):
self.cmp.do.insert_one(data)
self.cmp.compare.find(sort=[('root.0.a', 1), ('root.0.b', -1)])
self.cmp.compare.find(sort=[('root.0.b', 1), ('root.0.a', -1)])
self.cmp.compare.find(
sort=[
('root.0.b', 1), ('root.0.a', -1), ('root.0.c', 1)])
def test__find_limit(self):
self.cmp.do.delete_many({})
for data in ({'a': 1, 'b': 3, 'c': 'data1'},
{'a': 2, 'b': 2, 'c': 'data3'},
{'a': 3, 'b': 1, 'c': 'data2'}):
self.cmp.do.insert_one(data)
self.cmp.compare.find(limit=2, sort=[('a', 1), ('b', -1)])
# pymongo limit defaults to 0, returning everything
self.cmp.compare.find(limit=0, sort=[('a', 1), ('b', -1)])
def test__find_projection_subdocument_lists(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'a': 1, 'b': [{'c': 3, 'd': 4}, {'c': 5, 'd': 6}]})
self.cmp.compare.find_one({'a': 1}, {'_id': 0, 'a': 1, 'b': 1})
self.cmp.compare_exceptions.find_one(
{'a': 1}, OrderedDict([('_id', 0), ('a', 1), ('b', 1), ('b.c', 1)]))
self.cmp.compare_exceptions.find_one(
{'a': 1}, OrderedDict([('_id', 0), ('a', 1), ('b.c', 1), ('b', 1)]))
self.cmp.compare.find_one({'a': 1}, {'_id': 0, 'a': 1, 'b.c': 1})
self.cmp.compare.find_one({'a': 1}, {'_id': 0, 'a': 0, 'b.c': 0})
self.cmp.compare.find_one({'a': 1}, {'_id': 0, 'a': 1, 'b.c.e': 1})
self.cmp.compare_exceptions.find_one(
{'a': 1}, OrderedDict([('_id', 0), ('a', 0), ('b.c', 0), ('b.c.e', 0)]))
# This one is not implemented in mongmock yet.
# self.cmp.compare.find_one(
# {'a': 1}, OrderedDict([('_id', 0), ('a', 0), ('b.c.e', 0), ('b.c', 0)]))
def test__find_type(self):
supported_types = (
'double',
'string',
'object',
'array',
'binData',
'objectId',
'bool',
'date',
'int',
'long',
'decimal',
)
self.cmp.do.insert_many([
{'a': 1.2}, # double
{'a': 'a string value'}, # string
{'a': {'b': 1}}, # object
{'a': [1, 2, 3]}, # array or int
{'a': b'hello'}, # binData
{'a': ObjectId()}, # objectId
{'a': True}, # bool
{'a': datetime.datetime.now()}, # date
{'a': 1}, # int
{'a': 1 << 32}, # long
{'a': decimal128.Decimal128('1.1')}, # decimal
])
for type_name in supported_types:
self.cmp.compare.find({'a': {'$type': type_name}})
@skipIf(sys.version_info < (3, 7), 'Older versions of Python cannot copy regex partterns')
@skipIf(
helpers.PYMONGO_VERSION >= version.parse('4.0'),
'pymongo v4 or above do not specify uuid encoding')
def test__sort_mixed_types(self):
self.cmp.do.insert_many([
{'type': 'bool', 'a': True},
{'type': 'datetime', 'a': datetime.datetime.now()},
{'type': 'dict', 'a': {'a': 1}},
{'type': 'emptyList', 'a': []},
{'type': 'int', 'a': 1},
{'type': 'listOfList', 'a': [[1, 2], [3, 4]]},
{'type': 'missing'},
{'type': 'None', 'a': None},
{'type': 'ObjectId', 'a': ObjectId()},
{'type': 'regex', 'a': re.compile('a')},
{'type': 'repeatedInt', 'a': [1, 2]},
{'type': 'string', 'a': 'a'},
{'type': 'tupleOfTuple', 'a': ((1, 2), (3, 4))},
{'type': 'uuid', 'a': uuid.UUID(int=3)},
{'type': 'DBRef', 'a': DBRef('a', 'a', 'db_name')}
])
self.cmp.compare.find({}, sort=[('a', 1), ('type', 1)])
@skipIf(
helpers.PYMONGO_VERSION >= version.parse('4.0'),
'pymongo v4 or above do not specify uuid encoding')
def test__find_sort_uuid(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_many([
{'_id': uuid.UUID(int=3), 'timestamp': 99, 'a': 1},
{'_id': uuid.UUID(int=1), 'timestamp': 100, 'a': 3},
{'_id': uuid.UUID(int=2), 'timestamp': 100, 'a': 2},
])
self.cmp.compare.find({}, sort=[('timestamp', 1), ('_id', 1)])
@skipIf(
helpers.PYMONGO_VERSION < version.parse('4.0'),
'old version of pymongo accepts to encode uuid')
def test__fail_at_uuid_encoding(self):
self.cmp.compare_exceptions.insert_one({'_id': uuid.UUID(int=2)})
def test__find_all(self):
self.cmp.do.insert_many([
{
'code': 'ijk',
'tags': ['electronics', 'school'],
'qty': [{'size': 'M', 'num': 100, 'color': 'green'}],
},
{
'code': 'efg',
'tags': ['school', 'book'],
'qty': [
{'size': 'S', 'num': 10, 'color': 'blue'},
{'size': 'M', 'num': 100, 'color': 'blue'},
{'size': 'L', 'num': 100, 'color': 'green'},
],
},
])
self.cmp.compare.find({'qty.size': {'$all': ['M', 'L']}})
# def test__as_class(self):
# class MyDict(dict):
# pass
#
# self.cmp.do.delete_many({})
# self.cmp.do.insert_one(
# {'a': 1, 'b': {'ba': 3, 'bb': 4, 'bc': [{'bca': 5}]}})
# self.cmp.compare.find({}, as_class=MyDict)
# self.cmp.compare.find({'a': 1}, as_class=MyDict)
def test__return_only_selected_fields(self):
self.cmp.do.insert_one({'name': 'Chucky', 'type': 'doll', 'model': 'v6'})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection=['type'])
def test__return_only_selected_fields_no_id(self):
self.cmp.do.insert_one({'name': 'Chucky', 'type': 'doll', 'model': 'v6'})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection={'type': 1, '_id': 0})
def test__return_only_selected_fields_nested_field_found(self):
self.cmp.do.insert_one(
{'name': 'Chucky', 'properties': {'type': 'doll', 'model': 'v6'}})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection=['properties.type'])
def test__return_only_selected_fields_nested_field_not_found(self):
self.cmp.do.insert_one(
{'name': 'Chucky', 'properties': {'type': 'doll', 'model': 'v6'}})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection=['properties.color'])
def test__return_only_selected_fields_nested_field_found_no_id(self):
self.cmp.do.insert_one(
{'name': 'Chucky', 'properties': {'type': 'doll', 'model': 'v6'}})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection={'properties.type': 1, '_id': 0})
def test__return_only_selected_fields_nested_field_not_found_no_id(self):
self.cmp.do.insert_one(
{'name': 'Chucky', 'properties': {'type': 'doll', 'model': 'v6'}})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection={'properties.color': 1, '_id': 0})
def test__exclude_selected_fields(self):
self.cmp.do.insert_one({'name': 'Chucky', 'type': 'doll', 'model': 'v6'})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection={'type': 0})
def test__exclude_selected_fields_including_id(self):
self.cmp.do.insert_one({'name': 'Chucky', 'type': 'doll', 'model': 'v6'})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection={'type': 0, '_id': 0})
def test__exclude_all_fields_including_id(self):
self.cmp.do.insert_one({'name': 'Chucky', 'type': 'doll'})
self.cmp.compare.find(
{'name': 'Chucky'}, projection={'type': 0, '_id': 0, 'name': 0})
def test__exclude_selected_nested_fields(self):
self.cmp.do.insert_one(
{'name': 'Chucky', 'properties': {'type': 'doll', 'model': 'v6'}})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection={'properties.type': 0})
def test__exclude_all_selected_nested_fields(self):
self.cmp.do.insert_one(
{'name': 'Chucky', 'properties': {'type': 'doll', 'model': 'v6'}})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection={'properties.type': 0, 'properties.model': 0})
def test__default_fields_if_projection_empty(self):
self.cmp.do.insert_one({'name': 'Chucky', 'type': 'doll', 'model': 'v6'})
self.cmp.compare_ignore_order.find({'name': 'Chucky'}, projection=[])
def test__projection_slice_int_first(self):
self.cmp.do.insert_one({'name': 'Array', 'values': [0, 1, 2, 3, 4, 5, 6, 7]})
self.cmp.compare.find({'name': 'Array'}, projection={'name': 1, 'values': {'$slice': 1}})
def test__projection_slice_int_last(self):
self.cmp.do.insert_one({'name': 'Array', 'values': [0, 1, 2, 3, 4, 5, 6, 7]})
self.cmp.compare.find({'name': 'Array'}, projection={'name': 1, 'values': {'$slice': -1}})
def test__projection_slice_list_pos(self):
self.cmp.do.insert_one({'name': 'Array', 'values': [0, 1, 2, 3, 4, 5, 6, 7]})
self.cmp.compare.find({'name': 'Array'}, projection={
'name': 1, 'values': {'$slice': [3, 1]}})
def test__projection_slice_list_neg(self):
self.cmp.do.insert_one({'name': 'Array', 'values': [0, 1, 2, 3, 4, 5, 6, 7]})
self.cmp.compare.find({'name': 'Array'}, projection={
'name': 1, 'values': {'$slice': [-3, 1]}})
def test__projection_slice_list_pos_to_end(self):
self.cmp.do.insert_one({'name': 'Array', 'values': [0, 1, 2, 3, 4, 5, 6, 7]})
self.cmp.compare.find({'name': 'Array'}, projection={
'name': 1, 'values': {'$slice': [3, 10]}})
def test__projection_slice_list_neg_to_end(self):
self.cmp.do.insert_one({'name': 'Array', 'values': [0, 1, 2, 3, 4, 5, 6, 7]})
self.cmp.compare.find({'name': 'Array'}, projection={
'name': 1, 'values': {'$slice': [-3, 10]}})
def test__projection_slice_list_select_subfield(self):
self.cmp.do.insert_one({'name': 'Array', 'values': [
{'num': 0, 'val': 1}, {'num': 1, 'val': 2}]})
self.cmp.compare_exceptions.find({'name': 'Array'}, projection={
'values.num': 1, 'values': {'$slice': 1}})
def test__projection_slice_list_wrong_num_slice(self):
self.cmp.do.insert_one({'name': 'Array', 'values': [0, 1, 2, 3, 4, 5, 6, 7]})
self.cmp.compare_exceptions.find({'name': 'Array'}, projection={
'name': 1, 'values': {'$slice': [-3, 10, 1]}})
def test__projection_slice_list_wrong_slice_type(self):
self.cmp.do.insert_one({'name': 'Array', 'values': [0, 1, 2, 3, 4, 5, 6, 7]})
self.cmp.compare_exceptions.find({'name': 'Array'}, projection={
'name': 1, 'values': {'$slice': [1.0]}})
def test__projection_slice_list_wrong_slice_value_type(self):
self.cmp.do.insert_one({'name': 'Array', 'values': [0, 1, 2, 3, 4, 5, 6, 7]})
self.cmp.compare_exceptions.find({'name': 'Array'}, projection={
'name': 1, 'values': {'$slice': '3'}})
def test__projection_slice_list_wrong_value_type(self):
self.cmp.do.insert_one({'name': 'Array', 'values': 0})
self.cmp.compare_exceptions.find({'name': 'Array'}, projection={
'name': 1, 'values': {'$slice': 1}})
def test__remove(self):
"""Test the remove method."""
self.cmp.do.insert_one({'value': 1})
self.cmp.compare_ignore_order.find()
if helpers.PYMONGO_VERSION >= version.parse('4.0'):
self.cmp.compare_exceptions.remove()
return
self.cmp.do.remove()
self.cmp.compare.find()
self.cmp.do.insert_many([
{'name': 'bob'},
{'name': 'sam'},
])
self.cmp.compare_ignore_order.find()
self.cmp.do.remove({'name': 'bob'})
self.cmp.compare_ignore_order.find()
self.cmp.do.remove({'name': 'notsam'})
self.cmp.compare.find()
self.cmp.do.remove({'name': 'sam'})
self.cmp.compare.find()
def test__delete_one(self):
self.cmp.do.insert_many([{'a': i} for i in range(10)])
self.cmp.compare.find()
self.cmp.do.delete_one({'a': 5})
self.cmp.compare.find()
def test__delete_many(self):
self.cmp.do.insert_many([{'a': i} for i in range(10)])
self.cmp.compare.find()
self.cmp.do.delete_many({'a': {'$gt': 5}})
self.cmp.compare.find()
def test__update(self):
doc = {'a': 1}
self.cmp.do.insert_one(doc)
new_document = {'new_attr': 2}
if helpers.PYMONGO_VERSION >= version.parse('4.0'):
self.cmp.compare_exceptions.update({'a': 1}, new_document)
return
self.cmp.do.update({'a': 1}, new_document)
self.cmp.compare_ignore_order.find()
@skipIf(helpers.PYMONGO_VERSION >= version.parse('4.0'), 'pymongo v4 or above dropped update')
def test__update_upsert_with_id(self):
self.cmp.do.update(
{'a': 1}, {'_id': ObjectId('52d669dcad547f059424f783'), 'a': 1}, upsert=True)
self.cmp.compare.find()
def test__update_with_zero_id(self):
self.cmp.do.insert_one({'_id': 0})
self.cmp.do.replace_one({'_id': 0}, {'a': 1})
self.cmp.compare.find()
def test__update_upsert_with_dots(self):
self.cmp.do.update_one(
{'a.b': 1}, {'$set': {'c': 2}}, upsert=True)
self.cmp.compare.find()
def test__update_upsert_with_operators(self):
self.cmp.do.update_one(
{'$or': [{'name': 'billy'}, {'name': 'Billy'}]},
{'$set': {'name': 'Billy', 'age': 5}}, upsert=True)
self.cmp.compare.find()
self.cmp.do.update_one({'a.b': {'$eq': 1}, 'd': {}}, {'$set': {'c': 2}}, upsert=True)
self.cmp.compare.find()
def test__update_upsert_with_matched_subdocuments(self):
self.cmp.do.update_one(
{'b.c.': 1, 'b.d': 3},
{'$set': {'a': 1}}, upsert=True)
self.cmp.compare.find()
@skipIf(helpers.PYMONGO_VERSION >= version.parse('4.0'), 'pymongo v4 or above dropped update')
def test__update_with_empty_document_comes(self):
"""Tests calling update_one with just '{}' for replacing whole document"""
self.cmp.do.insert_one({'name': 'bob', 'hat': 'wide'})
self.cmp.do.update({'name': 'bob'}, {})
self.cmp.compare.find()
def test__update_one(self):
self.cmp.do.insert_many([{'a': 1, 'b': 0},
{'a': 2, 'b': 0}])
self.cmp.compare.find()
self.cmp.do.update_one({'a': 2}, {'$set': {'b': 1}})
self.cmp.compare.find()
self.cmp.do.update_one({'a': 3}, {'$set': {'a': 3, 'b': 0}})
self.cmp.compare.find()
self.cmp.do.update_one({'a': 3}, {'$set': {'a': 3, 'b': 0}},
upsert=True)
self.cmp.compare.find()
self.cmp.compare_exceptions.update_one({}, {'$set': {}})
self.cmp.compare_exceptions.update_one({'a': 'does-not-exist'}, {'$set': {}})
self.cmp.compare_exceptions.update_one({'a': 'does-not-exist'}, {'$set': {}}, upsert=True)
def test__update_many(self):
self.cmp.do.insert_many([{'a': 1, 'b': 0},
{'a': 2, 'b': 0}])
self.cmp.compare.find()
self.cmp.do.update_many({'b': 1}, {'$set': {'b': 1}})
self.cmp.compare.find()
self.cmp.do.update_many({'b': 0}, {'$set': {'b': 1}})
self.cmp.compare.find()
def test__replace_one(self):
self.cmp.do.insert_many([{'a': 1, 'b': 0},
{'a': 2, 'b': 0}])
self.cmp.compare.find()
self.cmp.do.replace_one({'a': 2}, {'a': 3, 'b': 0})
self.cmp.compare.find()
self.cmp.do.replace_one({'a': 4}, {'a': 4, 'b': 0})
self.cmp.compare.find()
self.cmp.do.replace_one({'a': 4}, {'a': 4, 'b': 0}, upsert=True)
self.cmp.compare.find()
def test__set(self):
"""Tests calling update with $set members."""
self.cmp.do.update_one(
{'_id': 42},
{'$set': {'some': 'thing'}},
upsert=True)
self.cmp.compare.find({'_id': 42})
self.cmp.do.insert_one({'name': 'bob'})
self.cmp.do.update_one({'name': 'bob'}, {'$set': {'hat': 'green'}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update_one({'name': 'bob'}, {'$set': {'hat': 'red'}})
self.cmp.compare.find({'name': 'bob'})
def test__unset(self):
"""Tests calling update with $unset members."""
self.cmp.do.update_many({'name': 'bob'}, {'$set': {'a': 'aaa'}}, upsert=True)
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update_many({'name': 'bob'}, {'$unset': {'a': 0}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update_many({'name': 'bob'}, {'$set': {'a': 'aaa'}}, upsert=True)
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update_many({'name': 'bob'}, {'$unset': {'a': 1}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update_many({'name': 'bob'}, {'$set': {'a': 'aaa'}}, upsert=True)
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update_many({'name': 'bob'}, {'$unset': {'a': ''}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update_many({'name': 'bob'}, {'$set': {'a': 'aaa'}}, upsert=True)
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update_many({'name': 'bob'}, {'$unset': {'a': True}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update_many({'name': 'bob'}, {'$set': {'a': 'aaa'}}, upsert=True)
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update_many({'name': 'bob'}, {'$unset': {'a': False}})
self.cmp.compare.find({'name': 'bob'})
def test__unset_nested(self):
self.cmp.do.update_many({'_id': 1}, {'$set': {'a': {'b': 1, 'c': 2}}}, upsert=True)
self.cmp.do.update_many({'_id': 1}, {'$unset': {'a.b': True}})
self.cmp.compare.find()
self.cmp.do.update_many({'_id': 1}, {'$set': {'a': {'b': 1, 'c': 2}}}, upsert=True)
self.cmp.do.update_many({'_id': 1}, {'$unset': {'a.b': False}})
self.cmp.compare.find()
self.cmp.do.update_many({'_id': 1}, {'$set': {'a': {'b': 1}}}, upsert=True)
self.cmp.do.update_many({'_id': 1}, {'$unset': {'a.b': True}})
self.cmp.compare.find()
self.cmp.do.update_many({'_id': 1}, {'$set': {'a': {'b': 1}}}, upsert=True)
self.cmp.do.update_many({'_id': 1}, {'$unset': {'a.b': False}})
self.cmp.compare.find()
def test__unset_positional(self):
self.cmp.do.insert_one({'a': 1, 'b': [{'c': 2, 'd': 3}]})
self.cmp.do.update_many(
{'a': 1, 'b': {'$elemMatch': {'c': 2, 'd': 3}}},
{'$unset': {'b.$.c': ''}}
)
self.cmp.compare.find()
def test__set_upsert(self):
self.cmp.do.delete_many({})
self.cmp.do.update_many({'name': 'bob'}, {'$set': {'age': 1}}, True)
self.cmp.compare.find()
self.cmp.do.update_many({'name': 'alice'}, {'$set': {'age': 1}}, True)
self.cmp.compare_ignore_order.find()
def test__set_subdocument_array(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'data': [0, 0]})
self.cmp.do.insert_one({'name': 'bob', 'some_field': 'B', 'data': [0, 0]})
self.cmp.do.update_many({'name': 'bob'}, {'$set': {'some_field': 'A', 'data.1': 3}})
self.cmp.compare.find()
def test__set_subdocument_array_bad_index_after_dot(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'some_field': 'B', 'data': [0, 0]})
self.cmp.do.update_many({'name': 'bob'}, {'$set': {'some_field': 'A', 'data.3': 1}})
self.cmp.compare.find()
def test__set_subdocument_array_bad_neg_index_after_dot(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'some_field': 'B', 'data': [0, 0]})
self.cmp.compare_exceptions.update_many({'name': 'bob'}, {'$set': {'data.-3': 1}})
def test__set_subdocuments_positional(self):
self.cmp.do.insert_one({'name': 'bob', 'subdocs': [
{'id': 1, 'name': 'foo'},
{'id': 2, 'name': 'bar'}
]})
self.cmp.do.update_many(
{'name': 'bob', 'subdocs.id': 2},
{'$set': {'subdocs.$': {'id': 3, 'name': 'baz'}}})
self.cmp.compare.find()
def test__inc(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob'})
for _ in range(3):
self.cmp.do.update_many({'name': 'bob'}, {'$inc': {'count': 1}})
self.cmp.compare.find({'name': 'bob'})
def test__max(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob'})
for i in range(3):
self.cmp.do.update_many({'name': 'bob'}, {'$max': {'count': i}})
self.cmp.compare.find({'name': 'bob'})
def test__min(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob'})
for i in range(3):
self.cmp.do.update_many({'name': 'bob'}, {'$min': {'count': i}})
self.cmp.compare.find({'name': 'bob'})
def test__inc_upsert(self):
self.cmp.do.delete_many({})
for _ in range(3):
self.cmp.do.update_many({'name': 'bob'}, {'$inc': {'count': 1}}, True)
self.cmp.compare.find({'name': 'bob'})
def test__inc_subdocument(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'data': {'age': 0}})
self.cmp.do.update_many({'name': 'bob'}, {'$inc': {'data.age': 1}})
self.cmp.compare.find()
self.cmp.do.update_many({'name': 'bob'}, {'$inc': {'data.age2': 1}})
self.cmp.compare.find()
def test__inc_subdocument_array(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'data': [0, 0]})
self.cmp.do.update_many({'name': 'bob'}, {'$inc': {'data.1': 1}})
self.cmp.compare.find()
self.cmp.do.update_many({'name': 'bob'}, {'$inc': {'data.1': 1}})
self.cmp.compare.find()
def test__inc_subdocument_array_bad_index_after_dot(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'data': [0, 0]})
self.cmp.do.update_many({'name': 'bob'}, {'$inc': {'data.3': 1}})
self.cmp.compare.find()
def test__inc_subdocument_array_bad_neg_index_after_dot(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'data': [0, 0]})
self.cmp.compare_exceptions.update_many({'name': 'bob'}, {'$inc': {'data.-3': 1}})
def test__inc_subdocument_positional(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'data': [{'age': 0}, {'age': 1}]})
self.cmp.do.update_many(
{'name': 'bob', 'data': {'$elemMatch': {'age': 0}}},
{'$inc': {'data.$.age': 1}})
self.cmp.compare.find()
def test__setOnInsert(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob'})
self.cmp.do.update_many({'name': 'bob'}, {'$setOnInsert': {'age': 1}})
self.cmp.compare.find()
self.cmp.do.update_many({'name': 'ann'}, {'$setOnInsert': {'age': 1}})
self.cmp.compare.find()
def test__setOnInsert_upsert(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob'})
self.cmp.do.update_many({'name': 'bob'}, {'$setOnInsert': {'age': 1}}, True)
self.cmp.compare.find()
self.cmp.do.update_many({'name': 'ann'}, {'$setOnInsert': {'age': 1}}, True)
self.cmp.compare.find()
def test__setOnInsert_subdocument(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'data': {'age': 0}})
self.cmp.do.update_many({'name': 'bob'}, {'$setOnInsert': {'data.age': 1}})
self.cmp.compare.find()
self.cmp.do.update_many({'name': 'bob'}, {'$setOnInsert': {'data.age1': 1}})
self.cmp.compare.find()
self.cmp.do.update_many({'name': 'ann'}, {'$setOnInsert': {'data.age': 1}})
self.cmp.compare.find()
def test__setOnInsert_subdocument_upsert(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'data': {'age': 0}})
self.cmp.do.update_many(
{'name': 'bob'}, {'$setOnInsert': {'data.age': 1}}, True)
self.cmp.compare.find()
self.cmp.do.update_many(
{'name': 'bob'}, {'$setOnInsert': {'data.age1': 1}}, True)
self.cmp.compare.find()
self.cmp.do.update_many(
{'name': 'ann'}, {'$setOnInsert': {'data.age': 1}}, True)
self.cmp.compare.find()
def test__setOnInsert_subdocument_elemMatch(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'data': [{'age': 0}, {'age': 1}]})
self.cmp.do.update_many(
{'name': 'bob', 'data': {'$elemMatch': {'age': 0}}},
{'$setOnInsert': {'data.$.age': 1}})
self.cmp.compare.find()
def test__inc_subdocument_positional_upsert(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'data': [{'age': 0}, {'age': 1}]})
self.cmp.do.update_many(
{'name': 'bob', 'data': {'$elemMatch': {'age': 0}}},
{'$setOnInsert': {'data.$.age': 1}}, True)
self.cmp.compare.find()
def test__addToSet(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob'})
for _ in range(3):
self.cmp.do.update_many({'name': 'bob'}, {'$addToSet': {'hat': 'green'}})
self.cmp.compare.find({'name': 'bob'})
for _ in range(3):
self.cmp.do.update_many({'name': 'bob'}, {'$addToSet': {'hat': 'tall'}})
self.cmp.compare.find({'name': 'bob'})
def test__addToSet_nested(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob'})
for _ in range(3):
self.cmp.do.update_many(
{'name': 'bob'}, {'$addToSet': {'hat.color': 'green'}})
self.cmp.compare.find({'name': 'bob'})
for _ in range(3):
self.cmp.do.update_many(
{'name': 'bob'}, {'$addToSet': {'hat.color': 'tall'}})
self.cmp.compare.find({'name': 'bob'})
def test__addToSet_each(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob'})
for _ in range(3):
self.cmp.do.update_many(
{'name': 'bob'},
{'$addToSet': {'hat': {'$each': ['green', 'yellow']}}})
self.cmp.compare.find({'name': 'bob'})
for _ in range(3):
self.cmp.do.update_many(
{'name': 'bob'},
{'$addToSet': {'shirt.color': {'$each': ['green', 'yellow']}}})
self.cmp.compare.find({'name': 'bob'})
def test__pop(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': ['green', 'tall']})
self.cmp.do.update_many({'name': 'bob'}, {'$pop': {'hat': 1}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': ['green', 'tall']})
self.cmp.do.update_many({'name': 'bob'}, {'$pop': {'hat': -1}})
self.cmp.compare.find({'name': 'bob'})
def test__pop_invalid_type(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': 'green'})
self.cmp.compare_exceptions.update_many({'name': 'bob'}, {'$pop': {'hat': 1}})
self.cmp.compare_exceptions.update_many({'name': 'bob'}, {'$pop': {'hat': -1}})
def test__pop_invalid_syntax(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': ['green']})
self.cmp.compare_exceptions.update_many({'name': 'bob'}, {'$pop': {'hat': 2}})
self.cmp.compare_exceptions.update_many({'name': 'bob'}, {'$pop': {'hat': '5'}})
self.cmp.compare_exceptions.update_many({'name': 'bob'}, {'$pop': {'hat.-1': 1}})
def test__pop_array_in_array(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': [['green']]})
self.cmp.do.update_many({'name': 'bob'}, {'$pop': {'hat.0': 1}})
self.cmp.compare.find({'name': 'bob'})
def test__pop_too_far_in_array(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': [['green']]})
self.cmp.do.update_many({'name': 'bob'}, {'$pop': {'hat.50': 1}})
self.cmp.compare.find({'name': 'bob'})
def test__pop_document_in_array(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': [{'hat': ['green']}]})
self.cmp.do.update_many({'name': 'bob'}, {'$pop': {'hat.0.hat': 1}})
self.cmp.compare.find({'name': 'bob'})
def test__pop_invalid_document_in_array(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': [{'hat': 'green'}]})
self.cmp.compare_exceptions.update_many({'name': 'bob'}, {'$pop': {'hat.0.hat': 1}})
def test__pop_empty(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': []})
self.cmp.do.update_many({'name': 'bob'}, {'$pop': {'hat': 1}})
self.cmp.compare.find({'name': 'bob'})
def test__pull(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob'})
self.cmp.do.update_many({'name': 'bob'}, {'$pull': {'hat': 'green'}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': ['green', 'tall']})
self.cmp.do.update_many({'name': 'bob'}, {'$pull': {'hat': 'green'}})
self.cmp.compare.find({'name': 'bob'})
def test__pull_query(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': [{'size': 5}, {'size': 10}]})
self.cmp.do.update_many(
{'name': 'bob'}, {'$pull': {'hat': {'size': {'$gt': 6}}}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.delete_many({})
self.cmp.do.insert_one(
{'name': 'bob', 'hat': {'sizes': [{'size': 5}, {'size': 8}, {'size': 10}]}}
)
self.cmp.do.update_many(
{'name': 'bob'}, {'$pull': {'hat.sizes': {'size': {'$gt': 6}}}})
self.cmp.compare.find({'name': 'bob'})
def test__pull_in_query_operator(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'sizes': [0, 1, 2, 3, 4, 5]})
self.cmp.do.update_one({'name': 'bob'}, {'$pull': {'sizes': {'$in': [1, 3]}}})
self.cmp.compare.find({'name': 'bob'})
def test__pull_in_nested_field(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'nested': {'sizes': [0, 1, 2, 3, 4, 5]}})
self.cmp.do.update_one({'name': 'bob'}, {'$pull': {'nested.sizes': {'$in': [1, 3]}}})
self.cmp.compare.find({'name': 'bob'})
def test__pull_nested_dict(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({
'name': 'bob',
'hat': [
{'name': 'derby',
'sizes': [{'size': 'L', 'quantity': 3},
{'size': 'XL', 'quantity': 4}],
'colors': ['green', 'blue']},
{'name': 'cap',
'sizes': [{'size': 'S', 'quantity': 10},
{'size': 'L', 'quantity': 5}],
'colors': ['blue']}]})
self.cmp.do.update_many(
{'hat': {'$elemMatch': {'name': 'derby'}}},
{'$pull': {'hat.$.sizes': {'size': 'L'}}})
self.cmp.compare.find({'name': 'bob'})
def test__pull_nested_list(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one(
{'name': 'bob', 'hat':
[{'name': 'derby', 'sizes': ['L', 'XL']},
{'name': 'cap', 'sizes': ['S', 'L']}]})
self.cmp.do.update_many(
{'hat': {'$elemMatch': {'name': 'derby'}}},
{'$pull': {'hat.$.sizes': 'XL'}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.delete_many({})
self.cmp.do.insert_one(
{'name': 'bob', 'hat': {'nested': ['element1', 'element2', 'element1']}})
self.cmp.do.update_many({'name': 'bob'}, {'$pull': {'hat.nested': 'element1'}})
self.cmp.compare.find({'name': 'bob'})
def test__pullAll(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob'})
self.cmp.do.update_many({'name': 'bob'}, {'$pullAll': {'hat': ['green']}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob'})
self.cmp.do.update_many(
{'name': 'bob'}, {'$pullAll': {'hat': ['green', 'blue']}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': ['green', 'tall', 'blue']})
self.cmp.do.update_many({'name': 'bob'}, {'$pullAll': {'hat': ['green']}})
self.cmp.compare.find({'name': 'bob'})
def test__pullAll_nested_dict(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one(
{'name': 'bob', 'hat': {'properties': {'sizes': ['M', 'L', 'XL']}}})
self.cmp.do.update_many(
{'name': 'bob'}, {'$pullAll': {'hat.properties.sizes': ['M']}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.delete_many({})
self.cmp.do.insert_one(
{'name': 'bob', 'hat': {'properties': {'sizes': ['M', 'L', 'XL']}}})
self.cmp.do.update_many(
{'name': 'bob'},
{'$pullAll': {'hat.properties.sizes': ['M', 'L']}})
self.cmp.compare.find({'name': 'bob'})
def test__push(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': ['green', 'tall']})
self.cmp.do.update_many({'name': 'bob'}, {'$push': {'hat': 'wide'}})
self.cmp.compare.find({'name': 'bob'})
def test__push_dict(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one(
{'name': 'bob', 'hat': [{'name': 'derby', 'sizes': ['L', 'XL']}]})
self.cmp.do.update_many(
{'name': 'bob'},
{'$push': {'hat': {'name': 'cap', 'sizes': ['S', 'L']}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_each(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': ['green', 'tall']})
self.cmp.do.update_many(
{'name': 'bob'}, {'$push': {'hat': {'$each': ['wide', 'blue']}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_dict(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({
'name': 'bob',
'hat': [
{'name': 'derby',
'sizes': [{'size': 'L', 'quantity': 3},
{'size': 'XL', 'quantity': 4}],
'colors': ['green', 'blue']},
{'name': 'cap',
'sizes': [{'size': 'S', 'quantity': 10},
{'size': 'L', 'quantity': 5}],
'colors': ['blue']}]})
self.cmp.do.update_many(
{'hat': {'$elemMatch': {'name': 'derby'}}},
{'$push': {'hat.$.sizes': {'size': 'M', 'quantity': 6}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_dict_each(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({
'name': 'bob',
'hat': [
{'name': 'derby',
'sizes': [{'size': 'L', 'quantity': 3},
{'size': 'XL', 'quantity': 4}],
'colors': ['green', 'blue']},
{'name': 'cap',
'sizes': [{'size': 'S', 'quantity': 10},
{'size': 'L', 'quantity': 5}],
'colors': ['blue']}]})
self.cmp.do.update_many(
{'hat': {'$elemMatch': {'name': 'derby'}}},
{'$push':
{'hat.$.sizes':
{'$each':
[{'size': 'M', 'quantity': 6}, {'size': 'S', 'quantity': 1}]}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_dict_in_list(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({
'name': 'bob',
'hat': [
{'name': 'derby',
'sizes': [{'size': 'L', 'quantity': 3},
{'size': 'XL', 'quantity': 4}],
'colors': ['green', 'blue']},
{'name': 'cap',
'sizes': [{'size': 'S', 'quantity': 10},
{'size': 'L', 'quantity': 5}],
'colors': ['blue']}]})
self.cmp.do.update_many(
{'name': 'bob'},
{'$push': {'hat.1.sizes': {'size': 'M', 'quantity': 6}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_list_each(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({
'name': 'bob',
'hat': [
{'name': 'derby',
'sizes': ['L', 'XL'],
'colors': ['green', 'blue']},
{'name': 'cap', 'sizes': ['S', 'L'],
'colors': ['blue']}
]
})
self.cmp.do.update_many(
{'hat': {'$elemMatch': {'name': 'derby'}}},
{'$push': {'hat.$.sizes': {'$each': ['M', 'S']}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_attribute(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': {'data': {'sizes': ['XL']}}})
self.cmp.do.update_many({'name': 'bob'}, {'$push': {'hat.data.sizes': 'L'}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_attribute_each(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob', 'hat': {}})
self.cmp.do.update_many(
{'name': 'bob'}, {'$push': {'hat.first': {'$each': ['a', 'b']}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_to_absent_nested_attribute(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob'})
self.cmp.do.update_many({'name': 'bob'}, {'$push': {'hat.data.sizes': 'L'}})
self.cmp.compare.find({'name': 'bob'})
def test__push_to_absent_field(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob'})
self.cmp.do.update_many({'name': 'bob'}, {'$push': {'hat': 'wide'}})
self.cmp.compare.find({'name': 'bob'})
def test__push_each_to_absent_field(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'name': 'bob'})
self.cmp.do.update_many(
{'name': 'bob'}, {'$push': {'hat': {'$each': ['wide', 'blue']}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_each_slice(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'scores': [40, 50, 60]})
self.cmp.do.update_one({}, {'$push': {'scores': {
'$each': [80, 78, 86],
'$slice': -5,
}}})
self.cmp.compare.find()
self.cmp.do.update_one({}, {'$push': {'scores': {
'$each': [100, 20],
'$slice': 3,
}}})
self.cmp.compare.find()
self.cmp.do.update_one({}, {'$push': {'scores': {
'$each': [],
'$slice': 2,
}}})
self.cmp.compare.find()
self.cmp.do.update_one({}, {'$push': {'scores': {
'$each': [25, 15],
'$slice': 0,
}}})
self.cmp.compare.find()
def test__update_push_slice_nested_field(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'games': [{'scores': [40, 50, 60]}, {'a': 1}]})
self.cmp.do.update_one({}, {'$push': {'games.0.scores': {
'$each': [80, 78, 86],
'$slice': -5,
}}})
self.cmp.compare.find()
self.cmp.do.update_one(
{'games': {'$elemMatch': {'scores': {'$exists': True}}}},
{'$push': {'games.$.scores': {'$each': [0, 1], '$slice': -5}}},
)
self.cmp.compare.find()
def test__update_push_array_of_arrays(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one({'scores': [[40, 50], [60, 20]]})
self.cmp.do.update_one(
{'scores': {'$elemMatch': {'0': 60}}},
{'$push': {'scores.$': 30}},
)
self.cmp.compare.find()
def test__update_push_sort(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_one(
{'a': {'b': [{'value': 3}, {'value': 1}, {'value': 2}]}})
self.cmp.do.update_one({}, {'$push': {'a.b': {
'$each': [{'value': 4}],
'$sort': {'value': 1},
}}})
self.cmp.compare.find()
def _compare_update_push_position(self, position):
self.cmp.do.delete_many({})
self.cmp.do.insert_one(
{'a': {'b': [{'value': 3}, {'value': 1}, {'value': 2}]}})
self.cmp.do.update_one({}, {'$push': {'a.b': {
'$each': [{'value': 4}],
'$position': position,
}}})
self.cmp.compare.find()
def test__update_push_position(self):
self._compare_update_push_position(0)
self._compare_update_push_position(1)
self._compare_update_push_position(5)
# TODO(pascal): Enable once we test against Mongo v3.6+
# self._compare_update_push_position(-2)
def test__drop(self):
self.cmp.do.insert_one({'name': 'another new'})
self.cmp.do.drop()
self.cmp.compare.find({})
def test__ensure_index(self):
if helpers.PYMONGO_VERSION >= version.parse('4.0'):
self.cmp.compare_exceptions.ensure_index('name')
return
self.cmp.compare.ensure_index('name')
self.cmp.compare.ensure_index('hat', cache_for=100)
self.cmp.compare.ensure_index([('name', 1), ('hat', -1)])
self.cmp.do.insert_one({})
self.cmp.compare.index_information()
def test__drop_index(self):
self.cmp.do.insert_one({})
self.cmp.compare.create_index([('name', 1), ('hat', -1)])
self.cmp.compare.drop_index([('name', 1), ('hat', -1)])
self.cmp.compare.index_information()
def test__drop_index_by_name(self):
self.cmp.do.insert_one({})
results = self.cmp.compare.create_index('name')
self.cmp.compare.drop_index(results['real'])
self.cmp.compare.index_information()
def test__index_information(self):
self.cmp.do.insert_one({})
self.cmp.compare.index_information()
def test__list_indexes(self):
self.cmp.do.insert_one({})
self.cmp.compare_ignore_order.sort_by(lambda i: i['name']).list_indexes()
def test__empty_logical_operators(self):
for operator in ('$or', '$and', '$nor'):
self.cmp.compare_exceptions.find({operator: []})
def test__rename(self):
input_ = {'_id': 1, 'foo': 'bar'}
self.cmp.do.insert_one(input_)
query = {'_id': 1}
update = {'$rename': {'foo': 'bar'}}
self.cmp.do.update_one(query, update=update)
self.cmp.compare.find()
def test__rename_collection(self):
self.cmp.do.insert_one({'_id': 1, 'foo': 'bar'})
self.cmp.compare.rename('new_name')
self.cmp.compare.find()
def test__set_equals(self):
self.cmp.do.insert_many([
{'array': ['one', 'three']},
])
self.cmp.compare.aggregate([{'$project': {
'_id': 0,
'same_array': {'$setEquals': ['$array', '$array']},
'eq_array': {'$setEquals': [['one', 'three'], '$array']},
'ne_array': {'$setEquals': [['one', 'two'], '$array']},
'eq_in_another_order': {'$setEquals': [['one', 'two'], ['two', 'one']]},
'ne_in_another_order': {'$setEquals': [['one', 'two'], ['three', 'one', 'two']]},
'three_equal': {'$setEquals': [['one', 'two'], ['two', 'one'], ['one', 'two']]},
'three_not_equal': {'$setEquals': [['one', 'three'], ['two', 'one'], ['two', 'one']]},
}}])
@skipIf(
helpers.PYMONGO_VERSION < version.parse('4.0'), 'pymongo v4 dropped map reduce methods')
def test__map_reduce_fails(self):
self.cmp.compare_exceptions.map_reduce(Code(''), Code(''), 'myresults')
self.cmp.compare_exceptions.inline_map_reduce(Code(''), Code(''))
self.cmp.compare_exceptions.group(['a'], {'a': {'$lt': 3}}, {'count': 0}, Code('''
function(cur, result) { result.count += cur.count }
'''))
@skipIf(helpers.PYMONGO_VERSION >= version.parse('4.0'), 'pymongo v4 dropped group method')
@skipIf(helpers.PYMONGO_VERSION < version.parse('3.6'), 'pymongo v3.6 broke group method')
def test__group_fails(self):
self.cmp.compare_exceptions.group(['a'], {'a': {'$lt': 3}}, {'count': 0}, Code('''
function(cur, result) { result.count += cur.count }
'''))
@skipIf(not helpers.HAVE_PYMONGO, 'pymongo not installed')
@skipIf(not _HAVE_MAP_REDUCE, 'execjs not installed')
@skipIf(helpers.PYMONGO_VERSION >= version.parse('4.0'), 'pymongo v4 dropped map reduce')
class CollectionMapReduceTest(TestCase):
def setUp(self):
self.db = mongomock.MongoClient().map_reduce_test
self.data = [{'x': 1, 'tags': ['dog', 'cat']},
{'x': 2, 'tags': ['cat']},
{'x': 3, 'tags': ['mouse', 'cat', 'dog']},
{'x': 4, 'tags': []}]
for item in self.data:
self.db.things.insert_one(item)
self.map_func = Code('''
function() {
this.tags.forEach(function(z) {
emit(z, 1);
});
}''')
self.reduce_func = Code('''
function(key, values) {
var total = 0;
for(var i = 0; i<values.length; i++) {
total += values[i];
}
return total;
}''')
self.expected_results = [{'_id': 'mouse', 'value': 1},
{'_id': 'dog', 'value': 2},
{'_id': 'cat', 'value': 3}]
def test__map_reduce(self):
self._check_map_reduce(self.db.things, self.expected_results)
def test__map_reduce_clean_res_colc(self):
# Checks that the result collection is cleaned between calls
self._check_map_reduce(self.db.things, self.expected_results)
more_data = [{'x': 1, 'tags': []},
{'x': 2, 'tags': []},
{'x': 3, 'tags': []},
{'x': 4, 'tags': []}]
for item in more_data:
self.db.more_things.insert_one(item)
expected_results = []
self._check_map_reduce(self.db.more_things, expected_results)
def _check_map_reduce(self, colc, expected_results):
result = colc.map_reduce(self.map_func, self.reduce_func, 'myresults')
self.assertIsInstance(result, mongomock.Collection)
self.assertEqual(result.name, 'myresults')
self.assertEqual(result.count_documents({}), len(expected_results))
for doc in result.find():
self.assertIn(doc, expected_results)
def test__map_reduce_son(self):
result = self.db.things.map_reduce(
self.map_func, self.reduce_func,
out=SON([('replace', 'results'), ('db', 'map_reduce_son_test')]))
self.assertIsInstance(result, mongomock.Collection)
self.assertEqual(result.name, 'results')
self.assertEqual(result.database.name, 'map_reduce_son_test')
self.assertEqual(result.count_documents({}), 3)
for doc in result.find():
self.assertIn(doc, self.expected_results)
def test__map_reduce_full_response(self):
expected_full_response = {
'counts': {
'input': 4,
'reduce': 2,
'emit': 6,
'output': 3
},
'timeMillis': 5,
'ok': 1.0,
'result': 'myresults'
}
result = self.db.things.map_reduce(
self.map_func, self.reduce_func,
'myresults', full_response=True)
self.assertIsInstance(result, dict)
self.assertEqual(result['counts'], expected_full_response['counts'])
self.assertEqual(result['result'], expected_full_response['result'])
for doc in getattr(self.db, result['result']).find():
self.assertIn(doc, self.expected_results)
def test__map_reduce_with_query(self):
expected_results = [{'_id': 'mouse', 'value': 1},
{'_id': 'dog', 'value': 2},
{'_id': 'cat', 'value': 2}]
result = self.db.things.map_reduce(
self.map_func, self.reduce_func,
'myresults', query={'tags': 'dog'})
self.assertIsInstance(result, mongomock.Collection)
self.assertEqual(result.name, 'myresults')
self.assertEqual(result.count_documents({}), 3)
for doc in result.find():
self.assertIn(doc, expected_results)
def test__map_reduce_with_limit(self):
result = self.db.things.map_reduce(
self.map_func, self.reduce_func, 'myresults', limit=2)
self.assertIsInstance(result, mongomock.Collection)
self.assertEqual(result.name, 'myresults')
self.assertEqual(result.count_documents({}), 2)
def test__inline_map_reduce(self):
result = self.db.things.inline_map_reduce(
self.map_func, self.reduce_func)
self.assertIsInstance(result, list)
self.assertEqual(len(result), 3)
for doc in result:
self.assertIn(doc, self.expected_results)
def test__inline_map_reduce_full_response(self):
expected_full_response = {
'counts': {
'input': 4,
'reduce': 2,
'emit': 6,
'output': 3
},
'timeMillis': 5,
'ok': 1.0,
'result': [
{'_id': 'cat', 'value': 3},
{'_id': 'dog', 'value': 2},
{'_id': 'mouse', 'value': 1}]
}
result = self.db.things.inline_map_reduce(
self.map_func, self.reduce_func, full_response=True)
self.assertIsInstance(result, dict)
self.assertEqual(result['counts'], expected_full_response['counts'])
for doc in result['result']:
self.assertIn(doc, self.expected_results)
def test__map_reduce_with_object_id(self):
obj1 = ObjectId()
obj2 = ObjectId()
data = [{'x': 1, 'tags': [obj1, obj2]},
{'x': 2, 'tags': [obj1]}]
for item in data:
self.db.things_with_obj.insert_one(item)
expected_results = [{'_id': obj1, 'value': 2},
{'_id': obj2, 'value': 1}]
result = self.db.things_with_obj.map_reduce(
self.map_func, self.reduce_func, 'myresults')
self.assertIsInstance(result, mongomock.Collection)
self.assertEqual(result.name, 'myresults')
self.assertEqual(result.count_documents({}), 2)
for doc in result.find():
self.assertIn(doc, expected_results)
def test_mongomock_map_reduce(self):
# Arrange
fake_etap = mongomock.MongoClient().db
fake_statuses_collection = fake_etap.create_collection('statuses')
fake_config_id = 'this_is_config_id'
test_name = 'this_is_test_name'
fake_statuses_objects = [
{
'testID': test_name,
'kind': 'Test',
'duration': 8392,
'configID': fake_config_id
},
{
'testID': test_name,
'kind': 'Test',
'duration': 8393,
'configID': fake_config_id
},
{
'testID': test_name,
'kind': 'Test',
'duration': 8394,
'configID': fake_config_id
}
]
fake_statuses_collection.insert_many(fake_statuses_objects)
map_function = Code('function(){emit(this._id, this.duration);}')
reduce_function = Code('function() {}')
search_query = {'configID': fake_config_id, 'kind': 'Test', 'testID': test_name}
# Act
result = fake_etap.statuses.map_reduce(
map_function, reduce_function, 'my_collection', query=search_query)
# Assert
self.assertEqual(result.count_documents({}), 3)
@skipIf(not helpers.HAVE_PYMONGO, 'pymongo not installed')
@skipIf(not _HAVE_MAP_REDUCE, 'execjs not installed')
@skipIf(helpers.PYMONGO_VERSION >= version.parse('3.6'), 'pymongo v3.6 broke group')
class GroupTest(_CollectionComparisonTest):
def setUp(self):
_CollectionComparisonTest.setUp(self)
self._id1 = ObjectId()
self.data = [
{'a': 1, 'count': 4},
{'a': 1, 'count': 2},
{'a': 1, 'count': 4},
{'a': 2, 'count': 3},
{'a': 2, 'count': 1},
{'a': 1, 'count': 5},
{'a': 4, 'count': 4},
{'b': 4, 'foo': 4},
{'b': 2, 'foo': 3, 'name': 'theone'},
{'b': 1, 'foo': 2},
{'b': 1, 'foo': self._id1},
]
self.cmp.do.insert_many(self.data)
def test__group1(self):
key = ['a']
initial = {'count': 0}
condition = {'a': {'$lt': 3}}
reduce_func = Code('''
function(cur, result) { result.count += cur.count }
''')
self.cmp.compare.group(key, condition, initial, reduce_func)
def test__group2(self):
reduce_func = Code('''
function(cur, result) { result.count += 1 }
''')
self.cmp.compare.group(
key=['b'],
condition={'foo': {'$in': [3, 4]}, 'name': 'theone'},
initial={'count': 0},
reduce=reduce_func)
def test__group3(self):
reducer = Code('''
function(obj, result) {result.count+=1 }
''')
conditions = {'foo': {'$in': [self._id1]}}
self.cmp.compare.group(
key=['foo'],
condition=conditions,
initial={'count': 0},
reduce=reducer)
@skipIf(not helpers.HAVE_PYMONGO, 'pymongo not installed')
class MongoClientAggregateTest(_CollectionComparisonTest):
def setUp(self):
super(MongoClientAggregateTest, self).setUp()
self.data = [
{'_id': ObjectId(), 'a': 1, 'b': 1, 'count': 4, 'swallows': ['European swallow'],
'date': datetime.datetime(2015, 10, 1, 10, 0)},
{'_id': ObjectId(), 'a': 1, 'b': 1, 'count': 2, 'swallows': ['African swallow'],
'date': datetime.datetime(2015, 12, 1, 12, 0)},
{'_id': ObjectId(), 'a': 1, 'b': 2, 'count': 4, 'swallows': ['European swallow'],
'date': datetime.datetime(2014, 10, 2, 12, 0)},
{'_id': ObjectId(), 'a': 2, 'b': 2, 'count': 3, 'swallows': ['African swallow',
'European swallow'],
'date': datetime.datetime(2015, 1, 2, 10, 0)},
{'_id': ObjectId(), 'a': 2, 'b': 3, 'count': 1, 'swallows': [],
'date': datetime.datetime(2013, 1, 3, 12, 0)},
{'_id': ObjectId(), 'a': 1, 'b': 4, 'count': 5, 'swallows': ['African swallow',
'European swallow'],
'date': datetime.datetime(2015, 8, 4, 12, 0)},
{'_id': ObjectId(), 'a': 4, 'b': 4, 'count': 4, 'swallows': ['unladen swallow'],
'date': datetime.datetime(2014, 7, 4, 13, 0)}]
for item in self.data:
self.cmp.do.insert_one(item)
def test__aggregate1(self):
pipeline = [
{'$match': {'a': {'$lt': 3}}},
{'$sort': {'_id': -1}},
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate2(self):
pipeline = [
{'$group': {'_id': '$a', 'count': {'$sum': '$count'}}},
{'$match': {'a': {'$lt': 3}}},
{'$sort': {'_id': -1, 'count': 1}},
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate3(self):
pipeline = [
{'$group': {'_id': 'a', 'count': {'$sum': '$count'}}},
{'$match': {'a': {'$lt': 3}}},
{'$sort': {'_id': -1, 'count': 1}},
{'$skip': 1},
{'$limit': 2}]
self.cmp.compare.aggregate(pipeline)
def test__aggregate4(self):
pipeline = [
{'$unwind': '$swallows'},
{'$sort': {'count': -1, 'swallows': -1}}]
self.cmp.compare.aggregate(pipeline)
def test__aggregate5(self):
pipeline = [
{'$group': {'_id': {'id_a': '$a'}, 'total': {'$sum': '$count'},
'avg': {'$avg': '$count'}}},
{'$sort': {'_id.a': 1, 'total': 1, 'avg': 1}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate6(self):
pipeline = [
{'$group': {'_id': {'id_a': '$a', 'id_b': '$b'}, 'total': {'$sum': '$count'},
'avg': {'$avg': '$count'}}},
{'$sort': {'_id.id_a': 1, '_id.id_b': 1, 'total': 1, 'avg': 1}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate7(self):
pipeline = [
{'$group': {'_id': {'id_a': '$a', 'id_b': {'$year': '$date'}},
'total': {'$sum': '$count'}, 'avg': {'$avg': '$count'}}},
{'$sort': {'_id.id_a': 1, '_id.id_b': 1, 'total': 1, 'avg': 1}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate8(self):
pipeline = [
{'$group': {'_id': None, 'counts': {'$sum': '$count'}}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate9(self):
pipeline = [
{'$group': {'_id': {'id_a': '$a'}, 'total': {'$sum': '$count'},
'avg': {'$avg': '$count'}}},
{'$group': {'_id': None, 'counts': {'$sum': '$total'}}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate10(self): # group on compound index
self.cmp.do.delete_many({})
data = [
{'_id': ObjectId(),
'key_1': {'sub_key_1': 'value_1'}, 'nb': 1},
{'_id': ObjectId(),
'key_1': {'sub_key_1': 'value_2'}, 'nb': 1},
{'_id': ObjectId(),
'key_1': {'sub_key_1': 'value_1'}, 'nb': 2}
]
for item in data:
self.cmp.do.insert_one(item)
pipeline = [
{'$group': {'_id': '$key_1.sub_key_1', 'nb': {'$sum': '$nb'}}},
]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate11(self):
pipeline = [
{'$group': {'_id': None, 'max_count': {'$max': '$count'},
'min_count': {'$min': '$count'}}},
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate12(self):
pipeline = [
{'$group': {'_id': '$a', 'max_count': {'$max': '$count'},
'min_count': {'$min': '$count'}}},
{'$sort': {'_id': 1}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate13(self):
pipeline = [
{'$sort': {'date': 1}},
{'$group': {'_id': None, 'last_date': {'$last': '$date'},
'first_date': {'$first': '$date'}}},
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate_on_no_data(self):
pipeline = [
{'$sort': {'date': 1}},
{'$group': {
'_id': None,
'last_unkown': {'$last': '$unkown_field'},
'first_unknown': {'$first': '$unknown_field'},
}},
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate14(self):
pipeline = [
{'$sort': {'date': 1}},
{'$group': {'_id': '$a', 'last_date': {'$last': '$date'},
'first_date': {'$first': '$date'}}},
{'$sort': {'_id': 1}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate_group_by_dbref(self):
self.cmp.do.insert_many([
{'myref': DBRef('a', '1')},
{'myref': DBRef('a', '1')},
{'myref': DBRef('a', '2')},
{'myref': DBRef('b', '1')},
])
self.cmp.compare.aggregate([
{'$group': {'_id': '$myref'}}
])
def test__aggregate_project_include_in_inclusion(self):
pipeline = [
{'$project': {'a': 1, 'b': 1}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate_project_exclude_in_exclusion(self):
pipeline = [
{'$project': {'a': 0, 'b': 0}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate_project_exclude_id_in_inclusion(self):
pipeline = [
{'$project': {'a': 1, '_id': 0}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate_project_with_subfields(self):
self.cmp.do.insert_many([
{'a': {'b': 3}, 'other': 1},
{'a': {'c': 3}},
{'b': {'c': 3}},
{'a': 5},
])
pipeline = [
{'$project': {'a.b': 1}}
]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate_project_with_subfields_exclude(self):
self.cmp.do.insert_many([
{'a': {'b': 3}, 'other': 1},
{'a': {'b': 3, 'd': 5}},
{'a': {'c': 3, 'd': 5}},
{'b': {'c': 3}},
{'a': 5},
])
pipeline = [
{'$project': {'a.b': 0}}
]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test_aggregate_project_with_missing_subfields(self):
self.cmp.do.insert_many([
{'a': {'b': 3}, 'other': 1},
{'a': {'b': {'c': 4}, 'd': 5}},
{'a': {'c': 3, 'd': 5}},
{'b': {'c': 3}},
{'a': 5},
])
pipeline = [
{'$project': {'_id': False, 'e': '$a.b.c'}}
]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate_unwind_project_id(self):
self.cmp.do.insert_one({
'_id': 'id0',
'c2': [
{'_id': 'id1', 'o': 'x'},
{'_id': 'id2', 'o': 'y'},
{'_id': 'id3', 'o': 'z'},
],
})
pipeline = [
{'$unwind': '$c2'},
{'$project': {'_id': '$c2._id', 'o': '$c2.o'}},
]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate17(self):
pipeline = [
{'$project': {'_id': 0, 'created': {'$subtract': [{'$min': ['$a', '$b']}, '$count']}}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate18(self):
pipeline = [
{'$project': {'_id': 0, 'created': {'$subtract': ['$a', '$b']}}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate19(self):
pipeline = [
{'$project': {'_id': 0, 'created': {'$subtract': ['$a', 1]}}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate20(self):
pipeline = [{'$project': {
'_id': 0,
'abs': {'$abs': '$b'},
'add': {'$add': ['$a', 1, '$b']},
'ceil': {'$ceil': 8.35},
'div': {'$divide': ['$a', 1]},
'exp': {'$exp': 2},
'floor': {'$floor': 4.65},
'ln': {'$ln': 100},
'log': {'$log': [8, 2]},
'log10': {'$log10': 1000},
'mod': {'$mod': [46, 9]},
'multiply': {'$multiply': [5, '$a', '$b']},
'pow': {'$pow': [4, 2]},
'sqrt': {'$sqrt': 100},
'trunc': {'$trunc': 8.35},
}}]
self.cmp.compare.aggregate(pipeline)
def test__aggregate21(self):
pipeline = [
{'$group': {'_id': '$a', 'count': {'$sum': 1}}},
]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate22(self):
pipeline = [
{'$group': {'_id': {'$gte': ['$a', 2]}, 'total': {'$sum': '$count'}}},
]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate23(self):
# make sure we aggregate compound keys correctly
pipeline = [
{'$group': {'_id': {'id_a': '$a', 'id_b': '$b'}, 'total': {'$sum': '$count'}}},
]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate24(self):
# make sure we aggregate zero rows correctly
pipeline = [
{'$match': {'_id': '123456'}},
{'$group': {'_id': {'$eq': ['$a', 1]}, 'total': {'$sum': '$count'}}},
]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate25(self):
pipeline = [
{'$group': {'_id': {'$eq': [{'$year': '$date'}, 2015]}}},
]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate26(self):
pipeline = [
{'$group': {'_id': {'$eq': [{'$year': '$date'}, 2015]}, 'total': {'$sum': '$count'}}},
]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate27(self):
# test $lookup stage
pipeline = [
{'$lookup': {
'from': self.collection_name,
'localField': 'a',
'foreignField': 'b',
'as': 'lookup'
}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate27b(self):
# test $graphLookup stage
self.cmp.do.delete_many({})
data = [
{'_id': ObjectId(),
'name': 'a', 'child': 'b', 'val': 2},
{'_id': ObjectId(),
'name': 'b', 'child': 'c', 'val': 3},
{'_id': ObjectId(),
'name': 'c', 'child': None, 'val': 4},
{'_id': ObjectId(),
'name': 'd', 'child': 'a', 'val': 5}
]
for item in data:
self.cmp.do.insert_one(item)
pipeline = [
{'$match': {'name': 'a'}},
{'$graphLookup': {
'from': self.collection_name,
'startWith': '$child',
'connectFromField': 'child',
'connectToField': 'name',
'as': 'lookup'
}},
{'$unwind': '$lookup'},
{'$sort': {'lookup.name': 1}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate28(self):
pipeline = [{'$group': {
'_id': '$b',
'total2015': {'$sum': {'$cond': [{'$ne': [{'$year': '$date'}, 2015]}, 0, 1]}},
}}]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate29(self):
# group addToSet
pipeline = [
{'$group': {'_id': '$a', 'nb': {'$addToSet': '$count'}}},
{'$sort': {'_id': 1}}
]
# self.cmp.compare cannot be used as addToSet returns elements in an unpredictable order
aggregations = self.cmp.do.aggregate(pipeline)
expected = list(aggregations['real'])
result = list(aggregations['fake'])
self.assertEqual(len(result), len(expected))
for expected_elt, result_elt in zip(expected, result):
self.assertCountEqual(expected_elt.keys(), result_elt.keys())
for key in result_elt:
if isinstance(result_elt[key], list):
self.assertCountEqual(result_elt[key], expected_elt[key], msg=key)
else:
self.assertEqual(result_elt[key], expected_elt[key], msg=key)
def test__aggregate30(self):
# group addToSet dict element
self.cmp.do.delete_many({})
data = [
{'a': {'c': '1', 'd': 1}, 'b': {'c': '2', 'd': 2}},
{'a': {'c': '1', 'd': 3}, 'b': {'c': '4', 'd': 4}},
{'a': {'c': '5', 'd': 1}, 'b': {'c': '6', 'd': 6}},
{'a': {'c': '5', 'd': 2}, 'b': {'c': '6', 'd': 6}}
]
self.cmp.do.insert_many(data)
pipeline = [
{'$group': {'_id': 'a.c', 'nb': {'$addToSet': 'b'}}},
]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate31(self):
# group addToSet creating dict
pipeline = [
{'$group': {'_id': '$count', 'set': {'$addToSet': {'a': '$a', 'b': '$b'}}}},
]
# self.cmp.compare cannot be used as addToSet returns elements in an unpredictable order
aggregations = self.cmp.do.aggregate(pipeline)
expected = list(aggregations['real'])
result = list(aggregations['fake'])
self.assertEqual(len(result), len(expected))
set_expected = set([
tuple(sorted(e.items())) for elt in expected for e in elt['set']
])
set_result = set([
tuple(sorted(e.items())) for elt in result for e in elt['set']
])
self.assertEqual(set_result, set_expected)
def test__aggregate_add_to_set_missing_value(self):
self.cmp.do.delete_many({})
data = [
{'a': {'c': '1', 'd': 1}, 'b': 1},
{'a': {'c': '1', 'd': 2}}
]
self.cmp.do.insert_many(data)
pipeline = [
{'$group': {'_id': 'a.c', 'nb': {'$addToSet': 'b'}}},
]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate32(self):
self.cmp.do.drop()
self.cmp.do.insert_many([
{'group': 'one'},
{'group': 'one'},
{'group': 'one', 'data': None},
{'group': 'one', 'data': 0},
{'group': 'one', 'data': 2},
{'group': 'one', 'data': {'a': 1}},
{'group': 'one', 'data': [1, 2]},
{'group': 'one', 'data': [3, 4]},
])
pipeline = [{'$group': {
'_id': '$group',
'count': {'$sum': 1},
'countData': {'$sum': {'$cond': ['$data', 1, 0]}},
'countDataExists': {'$sum': {'$cond': {
'if': {'$gt': ['$data', None]},
'then': 1,
'else': 0,
}}},
}}]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate33(self):
self.cmp.do.drop()
self.cmp.do.insert_one({'_id': 1, 'a': 2, 'b': 3, 'c': '$d'})
pipeline = [{'$project': {
'_id': 0,
'max': {'$max': [5, 9, '$a', None]},
'min': {'$min': [8, 2, None, 3, '$a', '$b']},
'avg': {'$avg': [4, 2, None, 3, '$a', '$b', 4]},
'sum': {'$sum': [4, 2, None, 3, '$a', '$b', {'$sum': [0, 1, '$b']}]},
'maxString': {'$max': [{'$literal': '$b'}, '$c']},
'maxNone': {'$max': [None, None]},
'minNone': {'$min': [None, None]},
'avgNone': {'$avg': ['a', None]},
'sumNone': {'$sum': ['a', None]},
}}]
self.cmp.compare.aggregate(pipeline)
def test__aggregate34(self):
self.cmp.do.drop()
self.cmp.do.insert_one({'_id': 1, 'a': 'Hello', 'b': 'World'})
pipeline = [{'$project': {
'_id': 0,
'concat': {'$concat': ['$a', ' Dear ', '$b']},
'concat_none': {'$concat': ['$a', None, '$b']},
'sub1': {'$substr': ['$a', 0, 4]},
'lower': {'$toLower': '$a'},
'lower_err': {'$toLower': None},
'split_string_none': {'$split': [None, 'l']},
'split_string_missing': {'$split': ['$missingField', 'l']},
'split_delimiter_none': {'$split': ['$a', None]},
'split_delimiter_missing': {'$split': ['$a', '$missingField']},
'split': {'$split': ['$a', 'l']},
'strcasecmp': {'$strcasecmp': ['$a', '$b']},
'upper': {'$toUpper': '$a'},
'upper_err': {'$toUpper': None},
}}]
self.cmp.compare.aggregate(pipeline)
def test__aggregate_regexpmatch(self):
self.cmp.do.insert_many([
{'_id': 1, 'description': 'Single LINE description.'},
{'_id': 2, 'description': 'First lines\nsecond line'},
{'_id': 3, 'description': 'Many spaces before line'},
{'_id': 4, 'description': 'Multiple\nline descriptions'},
{'_id': 5, 'description': 'anchors, links and hyperlinks'},
{'_id': 6, 'description': u'métier work vocation'}
])
self.cmp.compare.aggregate([{'$addFields': {
'result': {'$regexMatch': {'input': '$description', 'regex': 'line'}},
}}])
self.cmp.compare.aggregate([{'$addFields': {
'result': {'$regexMatch': {'input': '$description', 'regex': 'lin(e|k)'}},
}}])
self.cmp.compare.aggregate([{'$addFields': {
'result': {'$regexMatch': {'input': '$description', 'regex': 'line', 'options': 'i'}},
}}])
self.cmp.compare.aggregate([{'$addFields': {
'result': {'$regexMatch': {'input': '$description', 'regex': Regex('line', 'i')}},
}}])
self.cmp.compare.aggregate([{'$addFields': {
'result': {'$regexMatch': {
'input': '$description', 'regex': 'line(e|k) # matches line or link',
'options': 'x',
}},
}}])
self.cmp.compare.aggregate([{'$addFields': {
'result': {'$regexMatch': {
'input': '$description', 'regex': 'm.*line', 'options': 'si',
}},
}}])
# Missing fields
self.cmp.compare.aggregate([{'$addFields': {
'result': {'$regexMatch': {'input': '$missing', 'regex': 'line'}},
}}])
self.cmp.compare.aggregate([{'$addFields': {
'result': {'$regexMatch': {'input': '$description', 'regex': '$missing'}},
}}])
# Exceptions
self.cmp.compare_exceptions.aggregate([{'$addFields': {
'result': {'$regexMatch': ['$description', 'line']},
}}])
self.cmp.compare_exceptions.aggregate([{'$addFields': {
'result': {'$regexMatch': {'inut': '$description', 'regex': 'line'}},
}}])
self.cmp.compare_exceptions.aggregate([{'$addFields': {
'result': {'$regexMatch': {'input': '$description', 'regex': 'line', 'other': True}},
}}])
self.cmp.compare_exceptions.aggregate([{'$addFields': {
'result': {'$regexMatch': {'input': 42, 'regex': 'line'}},
}}])
self.cmp.compare_exceptions.aggregate([{'$addFields': {
'result': {'$regexMatch': {'input': '$description', 'regex': 'line', 'options': '?'}},
}}])
self.cmp.compare.aggregate([{'$addFields': {
'result': {'$regexMatch': {
'input': '$description', 'regex': Regex('line'), 'options': 'i'}},
}}])
self.cmp.compare_exceptions.aggregate([{'$addFields': {
'result': {'$regexMatch': {
'input': '$description', 'regex': re.compile('line', re.U), 'options': 'i'}},
}}])
self.cmp.compare_exceptions.aggregate([{'$addFields': {
'result': {'$regexMatch': {
'input': '$description', 'regex': re.compile('line', re.U)}},
}}])
self.cmp.compare_exceptions.aggregate([{'$addFields': {
'result': {'$regexMatch': {
'input': '$description', 'regex': Regex('line', 'i'), 'options': 'i'}},
}}])
self.cmp.compare_exceptions.aggregate([{'$addFields': {
'result': {'$regexMatch': {
'input': '$description', 'regex': Regex('line', 'u')}},
}}])
self.cmp.compare_exceptions.aggregate([{'$addFields': {
'result': {'$regexMatch': {'input': '$description', 'regex': 5}},
}}])
def test__aggregate35(self):
self.cmp.do.drop()
self.cmp.do.insert_one({
'_id': 1,
'a': 2,
'b': 3,
'c': '$d',
'd': decimal128.Decimal128('4')
})
pipeline = [{'$project': {
'_id': 0,
'sum': {'$sum': [4, 2, None, 3, '$a', '$b', '$d', {'$sum': [0, 1, '$b']}]},
'sumNone': {'$sum': ['a', None]},
}}]
self.cmp.compare.aggregate(pipeline)
def test__aggregate_project_id_0(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_many([
{'_id': 4},
{'a': 5},
{},
])
pipeline = [{'$project': {'_id': 0}}]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate_project_array_subfield(self):
self.cmp.do.insert_many([
{'_id': 1, 'a': [{'b': 1, 'c': 2, 'd': 3}], 'e': 4},
{'_id': 2, 'a': [{'c': 12, 'd': 13}], 'e': 14},
{'_id': 3, 'a': [{'b': 21, 'd': 23}], 'e': 24},
{'_id': 4, 'a': [{'b': 31, 'c': 32}], 'e': 34},
{'_id': 5, 'a': [{'b': 41}], 'e': 44},
{'_id': 6, 'a': [{'c': 51}], 'e': 54},
{'_id': 7, 'a': [{'d': 51}], 'e': 54},
{'_id': 8, 'a': [
{'b': 61, 'c': 62, 'd': 63}, 65, 'foobar',
{'b': 66, 'c': 67, 'd': 68}], 'e': 64},
{'_id': 9, 'a': []},
{'_id': 10, 'a': [1, 2, 3, 4]},
{'_id': 11, 'a': 'foobar'},
{'_id': 12, 'a': 5},
])
pipeline = [{'$project': {'a.b': 1, 'a.c': 1}}]
self.cmp.compare_ignore_order.aggregate(pipeline)
def test__aggregate_project_array_size_missing(self):
self.cmp.do.insert_one({'_id': 1})
self.cmp.compare_exceptions.aggregate([
{'$match': {'_id': 1}},
{'$project': {'a': {'$size': '$arr'}}},
])
def test__aggregate_bucket(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_many([
{
'_id': 1,
'title': 'The Pillars of Society',
'artist': 'Grosz',
'year': 1926,
'price': 199.99,
},
{
'_id': 2,
'title': 'Melancholy III',
'artist': 'Munch',
'year': 1902,
'price': 200.00,
},
{
'_id': 3,
'title': 'Dancer',
'artist': 'Miro',
'year': 1925,
'price': 76.04,
},
{
'_id': 4,
'title': 'The Great Wave off Kanagawa',
'artist': 'Hokusai',
'price': 167.30,
},
{
'_id': 5,
'title': 'The Persistence of Memory',
'artist': 'Dali',
'year': 1931,
'price': 483.00,
},
{
'_id': 6,
'title': 'Composition VII',
'artist': 'Kandinsky',
'year': 1913,
'price': 385.00,
},
{
'_id': 7,
'title': 'The Scream',
'artist': 'Munch',
'year': 1893,
# No price
},
{
'_id': 8,
'title': 'Blue Flower',
'artist': "O'Keefe",
'year': 1918,
'price': 118.42,
},
])
self.cmp.compare.aggregate([{'$bucket': {
'groupBy': '$price',
'boundaries': [0, 200, 400],
'default': 'Other',
'output': {
'count': {'$sum': 1},
'titles': {'$push': '$title'},
},
}}])
self.cmp.compare.aggregate([{'$bucket': {
'groupBy': '$price',
'boundaries': [0, 200, 400],
'default': 'Other',
}}])
def test__aggregate_lookup_dot_in_local_field(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_many([
{'_id': 2, 'should': {'do': 'join'}},
{'_id': 3, 'should': {'do': 'not_join'}},
{'_id': 4, 'should': 'skip'},
{'_id': 5, 'should': 'join'},
{'_id': 6, 'should': 'join'},
{'_id': 7, 'should': 'skip'},
])
pipeline = [
{'$lookup': {
'from': self.collection_name,
'localField': 'should.do',
'foreignField': 'should',
'as': 'b'
}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate_count(self):
self.cmp.do.insert_many([
{'_id': i} for i in range(5)
])
self.cmp.compare.aggregate([
{'$count': 'my_count'}
])
def test__aggregate_facet(self):
self.cmp.do.insert_many([
{'_id': i} for i in range(5)
])
self.cmp.compare.aggregate([
{'$facet': {
'pipeline_a': [{'$count': 'my_count'}],
'pipeline_b': [{'$group': {'_id': None}}]}}
])
def test__aggregate_project_rotate(self):
self.cmp.do.insert_one({'_id': 1, 'a': 1, 'b': 2, 'c': 3})
self.cmp.compare.aggregate([
{'$project': {'a': '$b', 'b': '$a', 'c': 1}},
])
def test__aggregate_unwind_options(self):
self.cmp.do.drop()
self.cmp.do.insert_many([
{'_id': 1, 'item': 'ABC', 'sizes': ['S', 'M', 'L']},
{'_id': 2, 'item': 'EFG', 'sizes': []},
{'_id': 3, 'item': 'IJK', 'sizes': 'M'},
{'_id': 4, 'item': 'LMN'},
{'_id': 5, 'item': 'XYZ', 'sizes': None},
])
self.cmp.compare.aggregate([{'$unwind': {'path': '$sizes'}}])
self.cmp.compare.aggregate([
{'$unwind': {'path': '$sizes', 'includeArrayIndex': 'arrayIndex'}}
])
self.cmp.compare.aggregate([
{'$unwind': {'path': '$sizes', 'preserveNullAndEmptyArrays': True}},
])
def test__aggregate_subtract_dates(self):
self.cmp.compare.aggregate([{'$project': {
'_id': 0,
'since': {'$subtract': ['$date', datetime.datetime(2014, 7, 4, 13, 0)]},
}}])
def test__aggregate_system_variables(self):
self.cmp.do.drop()
self.cmp.do.insert_many([
{'_id': 1},
{'_id': 2, 'parent_id': 1},
{'_id': 3, 'parent_id': 1},
])
self.cmp.compare.aggregate([
{'$match': {'parent_id': {'$in': [1]}}},
{'$group': {'_id': 1, 'docs': {'$push': '$$ROOT'}}},
])
def test__aggregate_date_operators(self):
self.cmp.compare_ignore_order.aggregate([
{'$project': {
'doy': {'$dayOfYear': '$date'},
'dom': {'$dayOfMonth': '$date'},
'dow': {'$dayOfWeek': '$date'},
'M': {'$month': '$date'},
'w': {'$week': '$date'},
'h': {'$hour': '$date'},
'm': {'$minute': '$date'},
's': {'$second': '$date'},
'ms': {'$millisecond': '$date'},
}},
])
def test__aggregate_in(self):
self.cmp.compare_ignore_order.aggregate([
{'$project': {
'count': '$count',
'in': {'$in': ['$count', [1, 4, 5]]},
}},
])
def test__aggregate_switch(self):
self.cmp.compare_ignore_order.aggregate([
{'$project': {
'compare_with_3': {
'$switch': {
'branches': [
{'case': {'$eq': ['$count', 3]}, 'then': 'equals 3'},
{'case': {'$gt': ['$count', 3]}, 'then': 'greater than 3'},
{'case': {'$lt': ['$count', 3]}, 'then': 'less than 3'}
],
}
},
'equals_3': {
'$switch': {
'branches': [
{'case': {'$eq': ['$count', 3]}, 'then': 'equals 3'},
],
'default': 'not equal',
}
},
'missing_field': {
'$switch': {
'branches': [
{'case': '$missing_field', 'then': 'first case'},
{'case': True, 'then': '$missing_field'},
],
'default': 'did not match',
}
},
}},
])
def test__aggregate_switch_mongodb_to_bool(self):
def build_switch(case):
return {
'$switch': {
'branches': [
{'case': case, 'then': 't'},
],
'default': 'f',
}
}
self.cmp.compare_ignore_order.aggregate([
{'$project': {
'undefined_value': build_switch('$not_existing_field'),
'false_value': build_switch(False),
'null_value': build_switch(None),
'zero_value': build_switch(0),
'true_value': build_switch(True),
'one_value': build_switch(1),
'empty_string': build_switch(''),
'empty_list': build_switch([]),
'empty_dict': build_switch({}),
}},
])
def test__aggregate_bug_473(self):
"""Regression test for bug https://github.com/mongomock/mongomock/issues/473."""
self.cmp.do.drop()
self.cmp.do.insert_one({
'name': 'first',
'base_value': 100,
'values_list': [
{'updated_value': 5},
{'updated_value': 15},
],
})
self.cmp.compare.aggregate([
{'$project': {
'name': 1,
'_id': 0,
'sum': {'$sum': [
'$base_value',
{'$arrayElemAt': ['$values_list.updated_value', -1]},
]},
}},
])
def test__aggregate_array_eleme_at(self):
self.cmp.do.drop()
self.cmp.do.insert_many([
{'values_list': [1, 2]},
{'values_list': [1, 2, 3]},
])
self.cmp.compare.aggregate([{
'$project': {
'first_user_id': {'$arrayElemAt': ['$values_list', 2]},
'other_user_id': {'$arrayElemAt': ['$values_list', -1]},
},
}])
def test_aggregate_bug_607(self):
"""Regression test for bug https://github.com/mongomock/mongomock/issues/607."""
self.cmp.do.drop()
self.cmp.do.insert_one({
'index': 2,
'values': [0, 1, 5]
})
self.cmp.compare.aggregate([
{'$project': {
'values_index': {'$arrayElemAt': ['$values', '$index']}
}}
])
self.cmp.compare.aggregate([
{'$project': {
'values_index': {'$arrayElemAt': ['$values', {'$add': [1, 1]}]}
}}
])
def test__aggregate_first_last_in_array(self):
self.cmp.do.drop()
self.cmp.do.insert_one({
'values': [0, 1, 5]
})
self.cmp.compare.aggregate([
{'$project': {
'first': {'$first': '$values'},
'last': {'$last': '$values'},
}}
])
def test__aggregate_cond_mongodb_to_bool(self):
"""Regression test for bug https://github.com/mongomock/mongomock/issues/650"""
self.cmp.compare_ignore_order.aggregate([
{'$project': {
# undefined aka KeyError
'undefined_value': {'$cond': ['$not_existing_field', 't', 'f']},
'false_value': {'$cond': [False, 't', 'f']},
'null_value': {'$cond': [None, 't', 'f']},
'zero_value': {'$cond': [0, 't', 'f']},
'true_value': {'$cond': [True, 't', 'f']},
'one_value': {'$cond': [1, 't', 'f']},
'empty_string': {'$cond': ['', 't', 'f']},
'empty_list': {'$cond': [[], 't', 'f']},
'empty_dict': {'$cond': [{}, 't', 'f']},
}},
])
def test__aggregate_concatArrays(self):
self.cmp.do.drop()
self.cmp.do.insert_one({
'_id': 1,
'a': [1, 2],
'b': ['foo', 'bar', 'baz'],
'c': {
'arr1': [123]
}
})
pipeline = [{
'$project': {
'_id': 0,
'concat': {'$concatArrays': ['$a', ['#', '*'], '$c.arr1', '$b']},
'concat_array_expression': {'$concatArrays': '$b'},
'concat_tuples': {'$concatArrays': ((1, 2, 3), (1,))},
'concat_none': {'$concatArrays': None},
'concat_missing_field': {'$concatArrays': '$foo'},
'concat_none_item': {'$concatArrays': ['$a', None, '$b']},
'concat_missing_field_item': {'$concatArrays': [[1, 2, 3], '$c.arr2']}
}
}]
self.cmp.compare.aggregate(pipeline)
def test__aggregate_concatArrays_exceptions(self):
self.cmp.do.drop()
self.cmp.do.insert_one({
'_id': 1,
'a': {
'arr1': [123]
}
})
self.cmp.compare_exceptions.aggregate([{
'$project': {
'concat_parameter_not_array': {'$concatArrays': 42}
}
}])
self.cmp.compare_exceptions.aggregate([{
'$project': {
'concat_item_not_array': {'$concatArrays': [[1, 2], '$a']}
}
}])
def test__aggregate_filter(self):
self.cmp.do.drop()
self.cmp.do.insert_many([
{
'_id': 0,
'items': [
{'item_id': 43, 'quantity': 2, 'price': 10},
{'item_id': 2, 'quantity': 1, 'price': 240},
],
},
{
'_id': 1,
'items': [
{'item_id': 23, 'quantity': 3, 'price': 110},
{'item_id': 103, 'quantity': 4, 'price': 5},
{'item_id': 38, 'quantity': 1, 'price': 300},
],
},
{
'_id': 2,
'items': [
{'item_id': 4, 'quantity': 1, 'price': 23},
],
},
])
self.cmp.compare.aggregate([{'$project': {'filtered_items': {'$filter': {
'input': '$items',
'as': 'item',
'cond': {'$gte': ['$$item.price', 100]},
}}}}])
self.cmp.compare.aggregate([{'$project': {'filtered_items': {'$filter': {
'input': '$items',
'cond': {'$lt': ['$$this.price', 100]},
}}}}])
def test__aggregate_map(self):
self.cmp.do.insert_one({
'array': [1, 2, 3, 4],
})
self.cmp.compare.aggregate([{'$project': {
'_id': 0,
'array': {'$map': {
'input': '$array',
'in': {'$multiply': ['$$this', '$$this']},
}},
'custom_variable': {'$map': {
'input': '$array',
'as': 'self',
'in': {'$multiply': ['$$self', '$$self']},
}},
'empty': {'$map': {
'input': [],
'in': {'$multiply': ['$$this', '$$this']},
}},
'null': {'$map': {
'input': None,
'in': '$$this',
}},
'missing': {'$map': {
'input': '$missing.key',
'in': '$$this',
}},
}}])
def test__aggregate_slice(self):
self.cmp.do.drop()
self.cmp.do.insert_many([
{
'_id': 0,
'items': list(range(10)),
},
{
'_id': 1,
'items': list(range(10, 20)),
},
{
'_id': 2,
'items': list(range(20, 30)),
},
])
self.cmp.compare.aggregate([{'$project': {'slice': {
'$slice': ['$items', 0]
}}}])
self.cmp.compare.aggregate([{'$project': {'slice': {
'$slice': ['$items', 5]
}}}])
self.cmp.compare.aggregate([{'$project': {'slice': {
'$slice': ['$items', 10]
}}}])
self.cmp.compare.aggregate([{'$project': {'slice': {
'$slice': ['$items', 0, 1]
}}}])
self.cmp.compare.aggregate([{'$project': {'slice': {
'$slice': ['$items', 0, 5]
}}}])
self.cmp.compare.aggregate([{'$project': {'slice': {
'$slice': ['$items', 5, 1]
}}}])
self.cmp.compare.aggregate([{'$project': {'slice': {
'$slice': ['$items', 5, 5]
}}}])
self.cmp.compare.aggregate([{'$project': {'slice': {
'$slice': ['$items', 0, 10000]
}}}])
self.cmp.compare.aggregate([{'$project': {'slice': {
'$slice': ['$items', -5]
}}}])
self.cmp.compare.aggregate([{'$project': {'slice': {
'$slice': ['$items', -10]
}}}])
self.cmp.compare.aggregate([{'$project': {'slice': {
'$slice': ['$items', -5, 5]
}}}])
self.cmp.compare.aggregate([{'$project': {'slice': {
'$slice': ['$items', -10, 5]
}}}])
def test__aggregate_no_entries(self):
pipeline = [
{'$match': {'a': {'$eq': 'Never going to happen'}}},
{'$out': 'new_collection'},
]
self.cmp.compare.aggregate(pipeline)
cmp = self._create_compare_for_collection('new_collection')
cmp.compare.find()
def test__replace_root(self):
self.cmp.do.drop()
self.cmp.do.insert_many([
{
'_id': 1,
'fruit': ['apples', 'oranges'],
'in_stock': {'oranges': 20, 'apples': 60},
'on_order': {'oranges': 35, 'apples': 75},
},
{
'_id': 2,
'vegetables': ['beets', 'yams'],
'in_stock': {'beets': 130, 'yams': 200},
'on_order': {'beets': 90, 'yams': 145},
},
])
self.cmp.compare.aggregate([{'$replaceRoot': {'newRoot': '$in_stock'}}])
def test__replace_root_new_document(self):
self.cmp.do.drop()
self.cmp.do.insert_many([
{'_id': 1, 'first_name': 'Gary', 'last_name': 'Sheffield', 'city': 'New York'},
{'_id': 2, 'first_name': 'Nancy', 'last_name': 'Walker', 'city': 'Anaheim'},
{'_id': 3, 'first_name': 'Peter', 'last_name': 'Sumner', 'city': 'Toledo'},
])
self.cmp.compare.aggregate([{'$replaceRoot': {'newRoot': {
'full_name': {'$concat': ['$first_name', '$last_name']},
}}}])
def test__insert_date_with_timezone(self):
self.cmp.do.insert_one({
'dateNoTz': datetime.datetime(2000, 1, 1, 12, 30, 30, 12745),
'dateTz': datetime.datetime(
2000, 1, 1, 12, 30, 30, 12745,
tzinfo=UTCPlus2()),
})
self.cmp.compare.find_one()
def test__aggregate_add_fields(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_many([
{'a': 1, 'b': 2},
{},
{'nested': {'foo': 1}},
{'nested': 'not nested'},
])
self.cmp.compare.aggregate([{'$addFields': {
'a': 3,
'c': {'$sum': [3, '$a', '$b']},
'd': '$d',
'nested.foo': 5,
}}])
def test__aggregate_add_fields_with_max_min(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_many([
{'_id': 4, 'dates': [
datetime.datetime(2020, 1, 10),
datetime.datetime(2020, 1, 5),
datetime.datetime(2020, 1, 7)
]},
{'_id': 5, 'dates': []}
])
pipeline = [
{'$addFields': {
'max_date': {'$max': '$dates'},
'min_date': {'$min': '$dates'}
}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate_add_fields_with_sum_avg(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_many([
{'_id': 4, 'values': [10, 5, 7]},
{'_id': 5, 'values': []}
])
pipeline = [
{'$addFields': {
'max_val': {'$sum': '$values'},
'min_val': {'$avg': '$values'}
}}
]
self.cmp.compare.aggregate(pipeline)
def test_aggregate_to_string(self):
self.cmp.do.drop()
self.cmp.do.insert_one({
'_id': ObjectId('5dd6a8f302c91829ef248162'),
'boolean_true': True,
'boolean_false': False,
'integer': 100,
'date': datetime.datetime(2018, 3, 27, 0, 58, 51, 538000),
})
pipeline = [
{
'$addFields': {
'_id': {'$toString': '$_id'},
'boolean_true': {'$toString': '$boolean_true'},
'boolean_false': {'$toString': '$boolean_false'},
'integer': {'$toString': '$integer'},
'date': {'$toString': '$date'},
'none': {'$toString': '$notexist'}
}
}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate_to_decimal(self):
self.cmp.do.drop()
self.cmp.do.insert_one({
'boolean_true': True,
'boolean_false': False,
'integer': 100,
'double': 1.999,
'decimal': decimal128.Decimal128('5.5000'),
'str_base_10_numeric': '123',
'str_negative_number': '-23',
'str_decimal_number': '1.99',
'str_not_numeric': '123a123',
'datetime': datetime.datetime.utcfromtimestamp(0),
})
pipeline = [
{
'$addFields': {
'boolean_true': {'$toDecimal': '$boolean_true'},
'boolean_false': {'$toDecimal': '$boolean_false'},
'integer': {'$toDecimal': '$integer'},
'double': {'$toDecimal': '$double'},
'decimal': {'$toDecimal': '$decimal'},
'str_base_10_numeric': {'$toDecimal': '$str_base_10_numeric'},
'str_negative_number': {'$toDecimal': '$str_negative_number'},
'str_decimal_number': {'$toDecimal': '$str_decimal_number'},
'datetime': {'$toDecimal': '$datetime'},
'not_exist_field': {'$toDecimal': '$not_exist_field'},
}
},
{'$project': {'_id': 0}},
]
self.cmp.compare.aggregate(pipeline)
def test_aggregate_to_int(self):
self.cmp.do.drop()
self.cmp.do.insert_one({
'boolean_true': True,
'boolean_false': False,
'integer': 100,
'double': 1.999,
'decimal': decimal128.Decimal128('5.5000')
})
pipeline = [
{
'$addFields': {
'boolean_true': {'$toInt': '$boolean_true'},
'boolean_false': {'$toInt': '$boolean_false'},
'integer': {'$toInt': '$integer'},
'double': {'$toInt': '$double'},
'decimal': {'$toInt': '$decimal'},
'not_exist': {'$toInt': '$not_exist'},
}
},
{
'$project': {
'_id': 0
}
}
]
self.cmp.compare.aggregate(pipeline)
def test_aggregate_to_long(self):
self.cmp.do.drop()
self.cmp.do.insert_one({
'boolean_true': True,
'boolean_false': False,
'integer': 100,
'double': 1.999,
'decimal': decimal128.Decimal128('5.5000')
})
pipeline = [
{
'$addFields': {
'boolean_true': {'$toLong': '$boolean_true'},
'boolean_false': {'$toLong': '$boolean_false'},
'integer': {'$toLong': '$integer'},
'double': {'$toLong': '$double'},
'decimal': {'$toLong': '$decimal'},
'not_exist': {'$toLong': '$not_exist'},
}
},
{
'$project': {
'_id': 0
}
}
]
self.cmp.compare.aggregate(pipeline)
def test_aggregate_date_to_string(self):
self.cmp.do.drop()
self.cmp.do.insert_one({
'start_date': datetime.datetime(2011, 11, 4, 0, 5, 23)
})
pipeline = [
{
'$addFields': {
'start_date': {
'$dateToString': {'format': '%Y/%m/%d %H:%M', 'date': '$start_date'}
}
}
},
{'$project': {'_id': 0}},
]
self.cmp.compare.aggregate(pipeline)
def test_aggregate_array_to_object(self):
self.cmp.do.drop()
self.cmp.do.insert_many([{
'items': [['a', 1], ['b', 2], ['c', 3], ['a', 4]]
}, {
'items': (['a', 1], ['b', 2], ['c', 3], ['a', 4])
}, {
'items': [('a', 1), ('b', 2), ('c', 3), ('a', 4)]
}, {
'items': (('a', 1), ('b', 2), ('c', 3), ('a', 4))
}, {
'items': [['a', 1], ('b', 2), ['c', 3], ('a', 4)]
}, {
'items': (['a', 1], ('b', 2), ['c', 3], ('a', 4))
}, {
'items': [{'k': 'a', 'v': 1}, {'k': 'b', 'v': 2},
{'k': 'c', 'v': 3}, {'k': 'a', 'v': 4}]
}, {
'items': []
}, {
'items': ()
}, {
'items': None
}])
pipeline = [
{
'$project': {
'items': {
'$arrayToObject': '$items'
},
'not_exists': {
'$arrayToObject': '$nothing'
}
}
},
{'$project': {'_id': 0}},
]
self.cmp.compare.aggregate(pipeline)
# All of these items should trigger an error
items = [[
{'$addFields': {'items': ''}},
{'$project': {'items': {'$arrayToObject': '$items'}, '_id': 0}}
], [
{'$addFields': {'items': 100}},
{'$project': {'items': {'$arrayToObject': '$items'}, '_id': 0}}
], [
{'$addFields': {'items': [['a', 'b', 'c'], ['d', 2]]}},
{'$project': {'items': {'$arrayToObject': '$items'}, '_id': 0}}
], [
{'$addFields': {'items': [['a'], ['b', 2]]}},
{'$project': {'items': {'$arrayToObject': '$items'}, '_id': 0}}
], [
{'$addFields': {'items': [[]]}},
{'$project': {'items': {'$arrayToObject': '$items'}, '_id': 0}}
], [
{'$addFields': {'items': [{'k': 'a', 'v': 1, 't': 't'}, {'k': 'b', 'v': 2}]}},
{'$project': {'items': {'$arrayToObject': '$items'}, '_id': 0}}
], [
{'$addFields': {'items': [{'v': 1, 't': 't'}]}},
{'$project': {'items': {'$arrayToObject': '$items'}, '_id': 0}}
], [
{'$addFields': {'items': [{}]}},
{'$project': {'items': {'$arrayToObject': '$items'}, '_id': 0}}
], [
{'$addFields': {'items': [['a', 1], {'k': 'b', 'v': 2}]}},
{'$project': {'items': {'$arrayToObject': '$items'}, '_id': 0}}
]]
for item in items:
self.cmp.compare_exceptions.aggregate(item)
def test__create_duplicate_index(self):
self.cmp.do.create_index([('value', 1)])
self.cmp.do.create_index([('value', 1)])
self.cmp.compare_exceptions.create_index([('value', 1)], unique=True)
def test_aggregate_project_with_boolean(self):
self.cmp.do.drop()
# Test with no items
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$and': []}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$or': []}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$not': {}}}}
])
# Tests following are with one item
self.cmp.do.insert_one({
'items': []
})
# Test with 0 arguments
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$and': []}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$or': []}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$not': {}}}}
])
# Test with one argument
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$and': [True]}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$or': [True]}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$not': True}}}
])
# Test with two arguments
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$and': [True, True]}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$and': [False, True]}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$and': [True, False]}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$and': [False, False]}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$or': [True, True]}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$or': [False, True]}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$or': [True, False]}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$or': [False, False]}}}
])
# Following tests are with more than two items
self.cmp.do.insert_many([
{'items': []},
{'items': []}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$and': []}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$or': []}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$not': {}}}}
])
# Test with something else than boolean
self.cmp.do.insert_one({
'items': ['foo']
})
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$and': [{'$eq': ['$items', ['foo']]}]}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$or': [{'$eq': ['$items', ['foo']]}]}}}
])
self.cmp.compare.aggregate([
{'$project': {'_id': 0, 'items': {'$not': {'$eq': ['$items', ['foo']]}}}}
])
def test__aggregate_project_missing_fields(self):
self.cmp.do.insert_one({'_id': 1, 'arr': {'a': 2, 'b': 3}})
self.cmp.compare.aggregate([
{'$match': {'_id': 1}},
{'$project': OrderedDict([
('_id', False),
('rename_dot', '$arr.c'),
('a', '$arr.a')
])}
])
def test__aggregate_graph_lookup_missing_field(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_many([
{'_id': ObjectId(),
'name': 'a', 'child': 'b', 'val': 2},
{'_id': ObjectId(),
'name': 'b', 'child': 'c', 'val': 3},
{'_id': ObjectId(),
'name': 'c', 'child': None, 'val': 4},
{'_id': ObjectId(),
'name': 'd', 'child': 'a', 'val': 5}
])
pipeline = [
{'$match': {'name': 'a'}},
{'$graphLookup': {
'from': self.collection_name,
'startWith': '$fieldThatDoesNotExist',
'connectFromField': 'child',
'connectToField': 'name',
'as': 'lookup'
}},
{'$unwind': '$lookup'},
{'$sort': {'lookup.name': 1}}
]
self.cmp.compare.aggregate(pipeline)
pipeline = [
{'$match': {'name': 'a'}},
{'$graphLookup': {
'from': self.collection_name,
'startWith': {'$concat': ['a', '$fieldThatDoesNotExist']},
'connectFromField': 'child',
'connectToField': 'name',
'as': 'lookup'
}},
{'$unwind': '$lookup'},
{'$sort': {'lookup.name': 1}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate_merge_objects(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_many([
{'_id': ObjectId(),
'a': '1', 'b': {'c': '1', 'd': 2}},
{'_id': ObjectId(),
'a': '1', 'b': {'e': 3, 'f': '4'}},
{'_id': ObjectId(),
'a': '1', 'c': '2'},
{'_id': ObjectId(),
'a': '1', 'b': None},
{'_id': ObjectId(),
'a': 2, 'b': None},
{'_id': ObjectId(),
'a': 2, 'b': {'c': None, 'd': 6}},
{'_id': ObjectId(),
'a': 2, 'b': {'c': '7', 'd': None, 'e': 9, 'f': '10'}},
{'_id': ObjectId(),
'a': 3, 'b': None},
{'_id': ObjectId(),
'a': 3, 'b': dict()},
{'_id': ObjectId(),
'a': 4, 'b': None},
])
pipeline = [
{'$group': {
'_id': '$a',
'merged_b': {'$mergeObjects': '$b'},
}}
]
self.cmp.compare_ignore_order.aggregate(pipeline)
@skipIf(not helpers.HAVE_PYMONGO, 'pymongo not installed')
class MongoClientGraphLookupTest(_CollectionComparisonTest):
def setUp(self):
super(MongoClientGraphLookupTest, self).setUp()
self.cmp_a = self._create_compare_for_collection('data_a')
self.cmp_b = self._create_compare_for_collection('data_b')
def test_graphlookup_basic(self):
data_a = [
{'_id': 0, 'airport': 'JFK', 'connects': ['BOS', 'ORD']},
{'_id': 1, 'airport': 'BOS', 'connects': ['JFK', 'PWM']},
{'_id': 2, 'airport': 'ORD', 'connects': ['JFK']},
{'_id': 3, 'airport': 'PWM', 'connects': ['BOS', 'LHR']},
{'_id': 4, 'airport': 'LHR', 'connects': ['PWM']},
]
data_b = [
{'_id': 1, 'name': 'Dev', 'nearestAirport': 'JFK'},
{'_id': 2, 'name': 'Eliot', 'nearestAirport': 'JFK'},
{'_id': 3, 'name': 'Jeff', 'nearestAirport': 'BOS'},
]
query = [
{
'$graphLookup': {
'from': 'a',
'startWith': '$nearestAirport',
'connectFromField': 'connects',
'connectToField': 'airport',
'maxDepth': 2,
'depthField': 'numConnections',
'as': 'destinations'
}
}
]
self.cmp_a.do.insert_many(data_a)
self.cmp_b.do.insert_many(data_b)
self.cmp_b.compare.aggregate(query)
def test_graphlookup_nested_array(self):
data_a = [
{'_id': 0, 'airport': 'JFK', 'connects': [
{'to': 'BOS', 'distance': 200}, {'to': 'ORD', 'distance': 800}]},
{'_id': 1, 'airport': 'BOS', 'connects': [
{'to': 'JFK', 'distance': 200}, {'to': 'PWM', 'distance': 2000}]},
{'_id': 2, 'airport': 'ORD', 'connects': [{'to': 'JFK', 'distance': 800}]},
{'_id': 3, 'airport': 'PWM', 'connects': [
{'to': 'BOS', 'distance': 2000}, {'to': 'LHR', 'distance': 6000}]},
{'_id': 4, 'airport': 'LHR', 'connects': [{'to': 'PWM', 'distance': 6000}]},
]
data_b = [
{'_id': 1, 'name': 'Dev', 'nearestAirport': 'JFK'},
{'_id': 2, 'name': 'Eliot', 'nearestAirport': 'JFK'},
{'_id': 3, 'name': 'Jeff', 'nearestAirport': 'BOS'},
]
query = [
{
'$graphLookup': {
'from': 'a',
'startWith': '$nearestAirport',
'connectFromField': 'connects.to',
'connectToField': 'airport',
'maxDepth': 2,
'depthField': 'numConnections',
'as': 'destinations'
}
}
]
self.cmp_a.do.insert_many(data_a)
self.cmp_b.do.insert_many(data_b)
self.cmp_b.compare.aggregate(query)
def test_graphlookup_nested_dict(self):
data_b = [
{'_id': 1, 'name': 'Dev'},
{'_id': 2, 'name': 'Eliot', 'reportsTo': {
'name': 'Dev', 'from': '2016-01-01T00:00:00.000Z'}},
{'_id': 3, 'name': 'Ron', 'reportsTo': {'name': 'Eliot',
'from': '2016-01-01T00:00:00.000Z'}},
{'_id': 4, 'name': 'Andrew', 'reportsTo': {
'name': 'Eliot', 'from': '2016-01-01T00:00:00.000Z'}},
{'_id': 5, 'name': 'Asya', 'reportsTo': {
'name': 'Ron', 'from': '2016-01-01T00:00:00.000Z'}},
{'_id': 6, 'name': 'Dan', 'reportsTo': {'name': 'Andrew',
'from': '2016-01-01T00:00:00.000Z'}},
]
data_a = [{'_id': 1, 'name': 'x'}]
query = [
{
'$graphLookup': {
'from': 'b',
'startWith': '$name',
'connectFromField': 'reportsTo.name',
'connectToField': 'name',
'as': 'reportingHierarchy'
}
}
]
self.cmp_a.do.insert_many(data_a)
self.cmp_b.do.insert_many(data_b)
self.cmp_b.compare.aggregate(query)
def test__aggregate_let(self):
self.cmp.do.insert_many([
{'_id': 1, 'price': 10, 'tax': 0.50, 'applyDiscount': True},
{'_id': 2, 'price': 10, 'tax': 0.25, 'applyDiscount': False},
])
self.cmp.compare.aggregate([{'$project': {
'finalTotal': {
'$let': {
'vars': {
'total': {'$add': ['$price', '$tax']},
'discounted': {'$cond': {'if': '$applyDiscount', 'then': 0.9, 'else': 1}},
},
'in': {'$multiply': ['$$total', '$$discounted']},
},
},
}}])
def test__aggregate_let_errors(self):
self.cmp.do.insert_many([
{'_id': 1, 'price': 10, 'tax': 0.50, 'applyDiscount': True},
{'_id': 2, 'price': 10, 'tax': 0.25, 'applyDiscount': False},
])
self.cmp.compare_exceptions.aggregate([{'$project': {
'finalTotal': {
'$let': [{'total': 3}, {'$$total'}],
},
}}])
self.cmp.compare_exceptions.aggregate([{'$project': {
'finalTotal': {
'$let': {
'in': {'$multiply': ['4', '3']},
},
},
}}])
self.cmp.compare_exceptions.aggregate([{'$project': {
'finalTotal': {
'$let': {
'vars': ['total', 'discounted'],
'in': {'$multiply': ['$$total', '$$discounted']},
},
},
}}])
def _LIMIT(*args):
return lambda cursor: cursor.limit(*args)
def _SORT(*args):
return lambda cursor: cursor.sort(*args)
def _COUNT(cursor):
return cursor.count()
def _COUNT_EXCEPTION_TYPE(cursor):
try:
cursor.count()
except Exception as error:
return str(type(error))
assert False, 'Count should have failed'
def _DISTINCT(*args):
def sortkey(value):
if isinstance(value, dict):
return [(k, sortkey(v)) for k, v in sorted(value.items())]
return value
return lambda cursor: sorted(cursor.distinct(*args), key=sortkey)
def _SKIP(*args):
return lambda cursor: cursor.skip(*args)
class MongoClientSortSkipLimitTest(_CollectionComparisonTest):
def setUp(self):
super(MongoClientSortSkipLimitTest, self).setUp()
self.cmp.do.insert_many([{'_id': i, 'index': i} for i in range(30)])
def test__skip(self):
self.cmp.compare(_SORT('index', 1), _SKIP(10)).find()
def test__skipped_find(self):
self.cmp.compare(_SORT('index', 1)).find(skip=10)
def test__limit(self):
self.cmp.compare(_SORT('index', 1), _LIMIT(10)).find()
def test__negative_limit(self):
self.cmp.compare(_SORT('index', 1), _LIMIT(-10)).find()
def test__skip_and_limit(self):
self.cmp.compare(_SORT('index', 1), _SKIP(10), _LIMIT(10)).find()
@skipIf(
helpers.PYMONGO_VERSION >= version.parse('4.0'),
'Cursor.count was removed in pymongo 4')
def test__count(self):
self.cmp.compare(_COUNT).find()
@skipUnless(
helpers.PYMONGO_VERSION >= version.parse('4.0'),
'Cursor.count was removed in pymongo 4')
def test__count_fail(self):
self.cmp.compare(_COUNT_EXCEPTION_TYPE).find()
def test__sort_name(self):
self.cmp.do.delete_many({})
for data in ({'a': 1, 'b': 3, 'c': 'data1'},
{'a': 2, 'b': 2, 'c': 'data3'},
{'a': 3, 'b': 1, 'c': 'data2'}):
self.cmp.do.insert_one(data)
self.cmp.compare(_SORT('a')).find()
self.cmp.compare(_SORT('b')).find()
def test__sort_name_nested_doc(self):
self.cmp.do.delete_many({})
for data in ({'root': {'a': 1, 'b': 3, 'c': 'data1'}},
{'root': {'a': 2, 'b': 2, 'c': 'data3'}},
{'root': {'a': 3, 'b': 1, 'c': 'data2'}}):
self.cmp.do.insert_one(data)
self.cmp.compare(_SORT('root.a')).find()
self.cmp.compare(_SORT('root.b')).find()
def test__sort_name_nested_list(self):
self.cmp.do.delete_many({})
for data in ({'root': [{'a': 1, 'b': 3, 'c': 'data1'}]},
{'root': [{'a': 2, 'b': 2, 'c': 'data3'}]},
{'root': [{'a': 3, 'b': 1, 'c': 'data2'}]}):
self.cmp.do.insert_one(data)
self.cmp.compare(_SORT('root.0.a')).find()
self.cmp.compare(_SORT('root.0.b')).find()
def test__sort_list(self):
self.cmp.do.delete_many({})
for data in ({'a': 1, 'b': 3, 'c': 'data1'},
{'a': 2, 'b': 2, 'c': 'data3'},
{'a': 3, 'b': 1, 'c': 'data2'}):
self.cmp.do.insert_one(data)
self.cmp.compare(_SORT([('a', 1), ('b', -1)])).find()
self.cmp.compare(_SORT([('b', 1), ('a', -1)])).find()
self.cmp.compare(_SORT([('b', 1), ('a', -1), ('c', 1)])).find()
def test__sort_list_nested_doc(self):
self.cmp.do.delete_many({})
for data in ({'root': {'a': 1, 'b': 3, 'c': 'data1'}},
{'root': {'a': 2, 'b': 2, 'c': 'data3'}},
{'root': {'a': 3, 'b': 1, 'c': 'data2'}}):
self.cmp.do.insert_one(data)
self.cmp.compare(_SORT([('root.a', 1), ('root.b', -1)])).find()
self.cmp.compare(_SORT([('root.b', 1), ('root.a', -1)])).find()
self.cmp.compare(
_SORT([('root.b', 1), ('root.a', -1), ('root.c', 1)])).find()
def test__sort_list_nested_list(self):
self.cmp.do.delete_many({})
for data in ({'root': [{'a': 1, 'b': 3, 'c': 'data1'}]},
{'root': [{'a': 2, 'b': 2, 'c': 'data3'}]},
{'root': [{'a': 3, 'b': 1, 'c': 'data2'}]}):
self.cmp.do.insert_one(data)
self.cmp.compare(_SORT([('root.0.a', 1), ('root.0.b', -1)])).find()
self.cmp.compare(_SORT([('root.0.b', 1), ('root.0.a', -1)])).find()
self.cmp.compare(
_SORT(
[('root.0.b', 1), ('root.0.a', -1),
('root.0.c', 1)])).find()
def test__sort_dict(self):
self.cmp.do.delete_many({})
self.cmp.do.insert_many([
{'a': 1, 'b': OrderedDict([('value', 1), ('other', True)])},
{'a': 2, 'b': OrderedDict([('value', 3)])},
{'a': 3, 'b': OrderedDict([('value', 2), ('other', False)])},
])
self.cmp.compare(_SORT('b')).find()
def test__close(self):
# Does nothing - just make sure it exists and takes the right args
self.cmp.do(lambda cursor: cursor.close()).find()
def test__distinct_nested_field(self):
self.cmp.do.insert_one({'f1': {'f2': 'v'}})
self.cmp.compare(_DISTINCT('f1.f2')).find()
def test__distinct_array_field(self):
self.cmp.do.insert_many(
[{'f1': ['v1', 'v2', 'v1']}, {'f1': ['v2', 'v3']}])
self.cmp.compare(_DISTINCT('f1')).find()
def test__distinct_array_nested_field(self):
self.cmp.do.insert_one({'f1': [{'f2': 'v'}, {'f2': 'w'}]})
self.cmp.compare(_DISTINCT('f1.f2')).find()
def test__distinct_array_field_with_dicts(self):
self.cmp.do.insert_many([
{'f1': [{'f2': 'v2'}, {'f3': 'v3'}]},
{'f1': [{'f3': 'v3'}, {'f4': 'v4'}]},
])
self.cmp.compare(_DISTINCT('f1')).find()
class InsertedDocumentTest(TestCase):
def setUp(self):
super(InsertedDocumentTest, self).setUp()
self.collection = mongomock.MongoClient().db.collection
self.data = {'a': 1, 'b': [1, 2, 3], 'c': {'d': 4}}
self.orig_data = copy.deepcopy(self.data)
self.object_id = self.collection.insert_one(self.data).inserted_id
def test__object_is_consistent(self):
[object] = self.collection.find()
self.assertEqual(object['_id'], self.object_id)
def test__find_by_id(self):
[object] = self.collection.find({'_id': self.object_id})
self.assertEqual(object, self.data)
@skipIf(
helpers.PYMONGO_VERSION and helpers.PYMONGO_VERSION >= version.parse('4.0'),
'remove was removed in pymongo v4')
def test__remove_by_id(self):
self.collection.remove(self.object_id)
self.assertEqual(0, self.collection.count_documents({}))
def test__inserting_changes_argument(self):
# Like pymongo, we should fill the _id in the inserted dict
# (odd behavior, but we need to stick to it)
self.assertEqual(self.data, dict(self.orig_data, _id=self.object_id))
def test__data_is_copied(self):
[object] = self.collection.find()
self.assertEqual(dict(self.orig_data, _id=self.object_id), object)
self.data.pop('a')
self.data['b'].append(5)
self.assertEqual(dict(self.orig_data, _id=self.object_id), object)
[object] = self.collection.find()
self.assertEqual(dict(self.orig_data, _id=self.object_id), object)
def test__find_returns_copied_object(self):
[object1] = self.collection.find()
[object2] = self.collection.find()
self.assertEqual(object1, object2)
self.assertIsNot(object1, object2)
object1['b'].append('bla')
self.assertNotEqual(object1, object2)
class ObjectIdTest(TestCase):
def test__equal_with_same_id(self):
obj1 = ObjectId()
obj2 = ObjectId(str(obj1))
self.assertEqual(obj1, obj2)
class MongoClientTest(_CollectionComparisonTest):
"""Compares a fake connection with the real mongo connection implementation
This is done via cross-comparison of the results.
"""
def setUp(self):
super(MongoClientTest, self).setUp()
self.cmp = MultiCollection({'fake': self.fake_conn, 'real': self.mongo_conn})
def test__database_names(self):
if helpers.PYMONGO_VERSION >= version.parse('4.0'):
self.cmp.compare_exceptions.database_names()
return
self.cmp.do.database_names()
class DatabaseTest(_CollectionComparisonTest):
"""Compares a fake database with the real mongo database implementation
This is done via cross-comparison of the results.
"""
def setUp(self):
super(DatabaseTest, self).setUp()
self.cmp = MultiCollection({
'fake': self.fake_conn[self.db_name],
'real': self.mongo_conn[self.db_name],
})
def test__database_names(self):
if helpers.PYMONGO_VERSION >= version.parse('4.0'):
self.cmp.compare_exceptions.collection_names()
return
self.cmp.do.collection_names()
| 38.757153
| 99
| 0.494968
|
226d489cafe507067cd1d3e7718b9f5cdd849a04
| 3,985
|
py
|
Python
|
code/DeepDG/utils/util.py
|
HXWAndCL/transferlearning
|
362326cc3d375320a480f89527d14878f7224392
|
[
"MIT"
] | 3
|
2021-11-04T01:20:32.000Z
|
2022-03-22T01:50:29.000Z
|
code/DeepDG/utils/util.py
|
FuSiry/transferlearning
|
04133d73df12ad1ae771f8c12d07b50531437131
|
[
"MIT"
] | null | null | null |
code/DeepDG/utils/util.py
|
FuSiry/transferlearning
|
04133d73df12ad1ae771f8c12d07b50531437131
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import random
import numpy as np
import torch
import sys
import os
import torchvision
import PIL
def set_random_seed(seed=0):
# seed setting
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def save_checkpoint(filename, alg, args):
save_dict = {
"args": vars(args),
"model_dict": alg.cpu().state_dict()
}
torch.save(save_dict, os.path.join(args.output, filename))
def train_valid_target_eval_names(args):
eval_name_dict = {'train': [], 'valid': [], 'target': []}
t = 0
for i in range(args.domain_num):
if i not in args.test_envs:
eval_name_dict['train'].append(t)
t += 1
for i in range(args.domain_num):
if i not in args.test_envs:
eval_name_dict['valid'].append(t)
else:
eval_name_dict['target'].append(t)
t += 1
return eval_name_dict
def alg_loss_dict(args):
loss_dict = {'CORAL': ['class', 'coral', 'total'],
'DANN': ['class', 'dis', 'total'],
'ERM': ['class'],
'Mixup': ['class'],
'MLDG': ['total'],
'MMD': ['class', 'mmd', 'total'],
'GroupDRO':['group'],
'RSC': ['class']}
return loss_dict[args.algorithm]
def print_args(args, print_list):
s = "==========================================\n"
l = len(print_list)
for arg, content in args.__dict__.items():
if l == 0 or arg in print_list:
s += "{}:{}\n".format(arg, content)
return s
def print_environ():
print("Environment:")
print("\tPython: {}".format(sys.version.split(" ")[0]))
print("\tPyTorch: {}".format(torch.__version__))
print("\tTorchvision: {}".format(torchvision.__version__))
print("\tCUDA: {}".format(torch.version.cuda))
print("\tCUDNN: {}".format(torch.backends.cudnn.version()))
print("\tNumPy: {}".format(np.__version__))
print("\tPIL: {}".format(PIL.__version__))
class Tee:
def __init__(self, fname, mode="a"):
self.stdout = sys.stdout
self.file = open(fname, mode)
def write(self, message):
self.stdout.write(message)
self.file.write(message)
self.flush()
def flush(self):
self.stdout.flush()
self.file.flush()
def img_param_init(args):
dataset = args.dataset
if dataset == 'office':
domains = ['amazon', 'dslr', 'webcam']
elif dataset == 'office-caltech':
domains = ['amazon', 'dslr', 'webcam', 'caltech']
elif dataset == 'office-home':
domains = ['Art', 'Clipart', 'Product', 'Real_World']
elif dataset == 'dg5':
domains = ['mnist', 'mnist_m', 'svhn', 'syn', 'usps']
elif dataset == 'PACS':
domains = ['art_painting', 'cartoon', 'photo', 'sketch']
elif dataset == 'VLCS':
domains = ['Caltech101', 'LabelMe', 'SUN09', 'VOC2007']
else:
print('No such dataset exists!')
args.domains = domains
args.img_dataset = {
'office': ['amazon', 'dslr', 'webcam'],
'office-caltech': ['amazon', 'dslr', 'webcam', 'caltech'],
'office-home': ['Art', 'Clipart', 'Product', 'Real_World'],
'PACS': ['art_painting', 'cartoon', 'photo', 'sketch'],
'dg5': ['mnist', 'mnist_m', 'svhn', 'syn', 'usps'],
'VLCS': ['Caltech101', 'LabelMe', 'SUN09', 'VOC2007']
}
if dataset == 'dg5':
args.input_shape = (3, 32, 32)
args.num_classes = 10
else:
args.input_shape = (3, 224, 224)
if args.dataset == 'office-home':
args.num_classes = 65
elif args.dataset == 'office':
args.num_classes = 31
elif args.dataset == 'PACS':
args.num_classes = 7
elif args.dataset == 'VLCS':
args.num_classes = 5
return args
| 30.419847
| 67
| 0.555834
|
7e422fdc11f80189c7041a45555c07cc87bede1d
| 12,279
|
py
|
Python
|
official/nlp/keras_nlp/layers/transformer_encoder_block_test.py
|
lorynebissuel/models
|
7f597cf851c793ce1b8db7a93a94894b04424d4c
|
[
"Apache-2.0"
] | 2
|
2021-04-02T12:21:35.000Z
|
2021-12-14T07:29:38.000Z
|
official/nlp/keras_nlp/layers/transformer_encoder_block_test.py
|
lorynebissuel/models
|
7f597cf851c793ce1b8db7a93a94894b04424d4c
|
[
"Apache-2.0"
] | null | null | null |
official/nlp/keras_nlp/layers/transformer_encoder_block_test.py
|
lorynebissuel/models
|
7f597cf851c793ce1b8db7a93a94894b04424d4c
|
[
"Apache-2.0"
] | 3
|
2019-11-12T11:18:11.000Z
|
2021-12-29T09:14:37.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras-based transformer block layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.nlp.keras_nlp.layers.transformer_encoder_block import TransformerEncoderBlock
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
('base', TransformerEncoderBlock))
class TransformerEncoderBlockLayerTest(keras_parameterized.TestCase):
def tearDown(self):
super(TransformerEncoderBlockLayerTest, self).tearDown()
tf.keras.mixed_precision.experimental.set_policy('float32')
def test_layer_creation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_creation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_invocation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
def test_layer_invocation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_layer_output_range(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_output_range_without_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048,
inner_activation='relu', norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
output_tensor = test_layer(input_data)
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1,
norm_first=True)
_ = new_layer(input_data)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer(input_data)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_output_range_with_pre_norm(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048,
inner_activation='relu', norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1,
norm_first=True)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_invocation_with_float16_dtype(self, transformer_cls):
tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (10 * np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_transform_with_initializer(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
def test_dynamic_layer_sequence(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = tf.keras.Input(shape=(None, width))
output_tensor = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output_tensor)
input_length = 17
input_data = np.ones((1, input_length, width))
output_data = model.predict(input_data)
self.assertAllEqual([1, input_length, width], output_data.shape)
def test_separate_qkv(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Forward path.
q_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
inputs = [q_tensor, kv_tensor, dummy_mask]
output = test_layer(inputs)
self.assertEqual(output.shape, q_tensor.shape)
@keras_parameterized.run_all_keras_modes
class TransformerArgumentTest(keras_parameterized.TestCase):
def test_use_bias_norm_first(self):
num_attention_heads = 2
hidden_size = 16
encoder_block = TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
# Forward path.
dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32)
inputs = [dummy_tensor, dummy_mask]
output = encoder_block(inputs)
self.assertEqual(output.shape, (2, 4, hidden_size))
def test_get_config(self):
num_attention_heads = 2
encoder_block = TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
encoder_block_config = encoder_block.get_config()
new_encoder_block = TransformerEncoderBlock.from_config(
encoder_block_config)
self.assertEqual(encoder_block_config, new_encoder_block.get_config())
if __name__ == '__main__':
tf.test.main()
| 40.65894
| 101
| 0.714879
|
4ae168c50b0f1ef04a141fbbe3fb4dcf751dcd19
| 1,873
|
py
|
Python
|
babel/sierra_adapters/webpac_scraper.py
|
BookOps-CAT/babel
|
47c8102bfbad8466185cd0e70501a931dd79ef29
|
[
"CC0-1.0",
"CC-BY-4.0"
] | null | null | null |
babel/sierra_adapters/webpac_scraper.py
|
BookOps-CAT/babel
|
47c8102bfbad8466185cd0e70501a931dd79ef29
|
[
"CC0-1.0",
"CC-BY-4.0"
] | 125
|
2017-10-12T12:14:23.000Z
|
2022-03-11T23:50:19.000Z
|
babel/sierra_adapters/webpac_scraper.py
|
BookOps-CAT/babel
|
47c8102bfbad8466185cd0e70501a931dd79ef29
|
[
"CC0-1.0",
"CC-BY-4.0"
] | null | null | null |
# adapter for web scraping classic III WebPac
import logging
import re
from bs4 import BeautifulSoup
import requests
from requests.exceptions import RequestException, Timeout
from errors import BabelError
BPL_SEARCH_URL = 'https://iii.brooklynpubliclibrary.org/search/i'
NYPL_SEARCH_URL = 'https://catalog.nypl.org/search/i'
TIME_OUT = 15 # seconds
mlogger = logging.getLogger('babel')
def get_html(system_id, keyword):
"""
retrieves html code from given url
args:
url: str
returns:
html: bytes
"""
if system_id == 1:
url = f'{BPL_SEARCH_URL}{keyword}'
elif system_id == 2:
url = f'{NYPL_SEARCH_URL}{keyword}'
if keyword:
headers = {'user-agent': 'BookOps/Babel'}
try:
response = requests.get(url, headers=headers, timeout=TIME_OUT)
mlogger.debug(
f'WebPAC scraper request: {response.url}, '
f'response code: {response.status_code}')
if response.status_code == requests.codes.ok:
return response.content
except Timeout:
mlogger.error('WebPAC scraper timed out.')
raise BabelError(
'Request for a page timed out. Terminating.')
except RequestException:
pass
def catalog_match(system_id, keyword):
"""
Scrapes html page of OPAC of the keyword search
args:
system_id: int, datastore system did
keyword: str, ISBN or UPC
returns: bool, False if no matches found or True if dup present
"""
html = get_html(system_id, keyword)
if html is not None:
soup = BeautifulSoup(html, 'html.parser')
res = soup.body.find('td', text=re.compile('^No matches found.*'))
if res:
return False
else:
return True
else:
return False
| 24.644737
| 75
| 0.616658
|
fb31c2a13e98ace5ef855a9c04cec3f3bd134758
| 1,949
|
py
|
Python
|
test/test_workflow_control_task_ref.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 21
|
2018-03-29T14:20:35.000Z
|
2021-10-13T05:11:41.000Z
|
test/test_workflow_control_task_ref.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 14
|
2018-01-30T15:45:46.000Z
|
2022-02-23T14:23:21.000Z
|
test/test_workflow_control_task_ref.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 18
|
2018-01-03T15:09:56.000Z
|
2021-07-16T02:21:54.000Z
|
# coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.workflow_control_task_ref import WorkflowControlTaskRef # noqa: E501
from intersight.rest import ApiException
class TestWorkflowControlTaskRef(unittest.TestCase):
"""WorkflowControlTaskRef unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testWorkflowControlTaskRef(self):
"""Test WorkflowControlTaskRef"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.workflow_control_task_ref.WorkflowControlTaskRef() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 51.289474
| 1,052
| 0.783992
|
a274d887c9ff946384b6b61c07c37581b1a78e1e
| 1,430
|
py
|
Python
|
aliyun-python-sdk-ocr/aliyunsdkocr/request/v20191230/RecognizePoiNameRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-ocr/aliyunsdkocr/request/v20191230/RecognizePoiNameRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-ocr/aliyunsdkocr/request/v20191230/RecognizePoiNameRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkocr.endpoint import endpoint_data
class RecognizePoiNameRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ocr', '2019-12-30', 'RecognizePoiName','ocr')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ImageURL(self):
return self.get_body_params().get('ImageURL')
def set_ImageURL(self,ImageURL):
self.add_body_params('ImageURL', ImageURL)
| 37.631579
| 75
| 0.765734
|
fcb3f55531b4a530783ee0acc22ad9e9bf1afbfb
| 6,695
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/cyanobiumspcaciam14.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/cyanobiumspcaciam14.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/cyanobiumspcaciam14.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Cyanobium sp. CACIAM14.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 23:14:21.430667
The undirected graph Cyanobium sp. CACIAM14 has 2895 nodes and 227162 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.05423 and has 16 connected components, where the component with most
nodes has 2856 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 135, the mean node degree is 156.93, and
the node degree mode is 2. The top 5 most central nodes are 1496688.ER33_05200
(degree 1044), 1496688.ER33_08250 (degree 873), 1496688.ER33_13715 (degree
838), 1496688.ER33_00215 (degree 806) and 1496688.ER33_08200 (degree 755).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import CyanobiumSpCaciam14
# Then load the graph
graph = CyanobiumSpCaciam14()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def CyanobiumSpCaciam14(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Cyanobium sp. CACIAM14 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Cyanobium sp. CACIAM14 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 23:14:21.430667
The undirected graph Cyanobium sp. CACIAM14 has 2895 nodes and 227162 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.05423 and has 16 connected components, where the component with most
nodes has 2856 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 135, the mean node degree is 156.93, and
the node degree mode is 2. The top 5 most central nodes are 1496688.ER33_05200
(degree 1044), 1496688.ER33_08250 (degree 873), 1496688.ER33_13715 (degree
838), 1496688.ER33_00215 (degree 806) and 1496688.ER33_08200 (degree 755).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import CyanobiumSpCaciam14
# Then load the graph
graph = CyanobiumSpCaciam14()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="CyanobiumSpCaciam14",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.42328
| 223
| 0.702614
|
f87943a1caa87568b4208e7d195dbf43476741ef
| 208
|
py
|
Python
|
maro/data_lib/__init__.py
|
VinayaSathyanarayana/maro
|
0ba55f36d89c235ef3af04efbac78b3885d8695d
|
[
"MIT"
] | 1
|
2020-09-30T09:31:05.000Z
|
2020-09-30T09:31:05.000Z
|
maro/data_lib/__init__.py
|
VinayaSathyanarayana/maro
|
0ba55f36d89c235ef3af04efbac78b3885d8695d
|
[
"MIT"
] | null | null | null |
maro/data_lib/__init__.py
|
VinayaSathyanarayana/maro
|
0ba55f36d89c235ef3af04efbac78b3885d8695d
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .binary_reader import BinaryReader
from .binary_converter import BinaryConverter
__all__ = ["BinaryReader", "BinaryConverter"]
| 23.111111
| 45
| 0.798077
|
2622046c0bd2c5364a5fb7364a94f68a6cc03293
| 1,152
|
py
|
Python
|
Python/simple_pyramid_blending.py
|
avishayzanbar/EDR
|
104d6deec7a15726b1e79f1fb3f16034f6b09af1
|
[
"Apache-2.0"
] | 4
|
2017-11-16T10:31:55.000Z
|
2020-11-08T16:05:09.000Z
|
Python/simple_pyramid_blending.py
|
avishayzanbar/EDR
|
104d6deec7a15726b1e79f1fb3f16034f6b09af1
|
[
"Apache-2.0"
] | null | null | null |
Python/simple_pyramid_blending.py
|
avishayzanbar/EDR
|
104d6deec7a15726b1e79f1fb3f16034f6b09af1
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from reflect_image import reflect_image
from gaussian_pyramid import gaussian_pyramid
from laplacian_pyramid import laplacian_pyramid
from pyramid_blend import expand_level
def simple_pyramid_blending(A, B, R, N):
[rows, cols, _] = A.shape
reflected_A = reflect_image(A, rows, cols)
reflected_B = reflect_image(B, rows, cols)
reflected_R = reflect_image(R, rows, cols)
gauss_pyramid_A = gaussian_pyramid(reflected_A, N)
gauss_pyramid_B = gaussian_pyramid(reflected_B, N)
gauss_pyramid_R = gaussian_pyramid(reflected_R, N)
laplace_pyramid_A = laplacian_pyramid(gauss_pyramid_A, N)
laplace_pyramid_B = laplacian_pyramid(gauss_pyramid_B, N)
blended_pyramid = []
for idx in range(N):
blended_pyramid.append(np.multiply(laplace_pyramid_A[idx], (1 - gauss_pyramid_R[idx][..., None])) +
np.multiply(laplace_pyramid_B[idx], (gauss_pyramid_R[idx][..., None])))
blended_image = blended_pyramid[-1]
for i in range(N - 2, -1, -1):
blended_image = expand_level(blended_image, blended_pyramid[i])
return blended_image[:rows, :cols, :]
| 34.909091
| 107
| 0.716146
|
99e75e112373299a3e8becc077374b5bbb6e0b78
| 5,149
|
py
|
Python
|
tests/test_getRTMSDataSvcSHTrade.py
|
nhlsm/PyDataGoKr
|
93521ee648da4727b98279db9ed4b1d9ed928972
|
[
"Apache-2.0"
] | 2
|
2021-04-26T05:20:56.000Z
|
2021-05-30T03:30:41.000Z
|
tests/test_getRTMSDataSvcSHTrade.py
|
nhlsm/PyDataGoKr
|
93521ee648da4727b98279db9ed4b1d9ed928972
|
[
"Apache-2.0"
] | null | null | null |
tests/test_getRTMSDataSvcSHTrade.py
|
nhlsm/PyDataGoKr
|
93521ee648da4727b98279db9ed4b1d9ed928972
|
[
"Apache-2.0"
] | null | null | null |
import logging
import sys
import pprint
import enum
import typing
import unittest
from collections import OrderedDict
import requests
import xmltodict
import pandas as pd
import data_go_kr
from data_go_kr import getRTMSDataSvcSHTrade as svc
from data_go_kr.utils.dong_code import *
class Test0(unittest.TestCase):
"""
Test that the result sum of all numbers
"""
@classmethod
def setUpClass(cls):
# debug
LOG_FORMAT = '%(pathname)s:%(lineno)03d - %(message)s'
# LOG_LEVEL = logging.DEBUG # DEBUG(10), INFO(20), (0~50)
LOG_LEVEL = logging.INFO # DEBUG(10), INFO(20), (0~50)
logging.basicConfig(format=LOG_FORMAT, level=LOG_LEVEL, stream=sys.stdout)
global SVC_KEY
SVC_KEY = data_go_kr.test_svc_key()
def test_rsp_44(self):
rsp = svc.get_rsp(serviceKey=SVC_KEY, LAWD_CD='11110', DEAL_YMD='201512')
# logging.info('code: %s', rsp.status_code)
# logging.info('hdr : %s', rsp.headers)
# logging.info('cont: %s', rsp.content)
self.assertEqual(rsp.status_code, 200)
rsp_content = svc.RspContent.fromRsp(rsp)
# logging.info('\n%s', pprint.pformat(rsp_content))
CNT = 44
self.assertEqual(rsp_content.totalCount(), CNT)
self.assertEqual(len(rsp_content.itemDictList()), CNT)
self.assertEqual(len(rsp_content.itemDataFrame()), CNT)
def test_rsp_127(self):
rsp = svc.get_rsp(serviceKey=SVC_KEY, LAWD_CD='50110', DEAL_YMD='201601')
# logging.info('code: %s', rsp.status_code)
# logging.info('hdr : %s', rsp.headers)
# logging.info('cont: %s', rsp.content)
self.assertEqual(rsp.status_code, 200)
rsp_content = svc.RspContent.fromRsp(rsp)
# logging.info('\n%s', pprint.pformat(rsp_content))
CNT = 127
self.assertEqual(rsp_content.totalCount(), CNT)
self.assertEqual(len(rsp_content.itemDictList()), CNT)
self.assertEqual(len(rsp_content.itemDataFrame()), CNT)
# logging.info('\n%s', rsp_content.itemDataFrame() )
def test_rsp_0(self):
rsp = svc.get_rsp(serviceKey=SVC_KEY, LAWD_CD='1111z', DEAL_YMD='201512')
# logging.info('code: %s', rsp.status_code)
# logging.info('hdr : %s', rsp.headers)
# logging.info('cont: %s', rsp.content)
self.assertEqual(rsp.status_code, 200)
rsp_content = svc.RspContent.fromRsp(rsp)
# logging.info('\n%s', pprint.pformat(rsp_content))
CNT = 0
self.assertEqual(rsp_content.totalCount(), CNT)
self.assertEqual(len(rsp_content.itemDictList()), CNT)
self.assertEqual(len(rsp_content.itemDataFrame()), CNT)
def test_rsp_1(self):
rsp = svc.get_rsp(serviceKey=SVC_KEY, LAWD_CD='42790', DEAL_YMD='200601')
# logging.info('code: %s', rsp.status_code)
# logging.info('hdr : %s', rsp.headers)
# logging.info('cont: %s', rsp.content)
self.assertEqual(rsp.status_code, 200)
rsp_content = svc.RspContent.fromRsp(rsp)
# logging.info('\n%s', pprint.pformat(rsp_content))
CNT = 1
self.assertEqual(rsp_content.totalCount(), CNT)
self.assertEqual(len(rsp_content.itemDictList()), CNT)
self.assertEqual(len(rsp_content.itemDataFrame()), CNT)
def test_rsp_2(self):
rsp = svc.get_rsp(serviceKey=SVC_KEY, LAWD_CD='42790', DEAL_YMD='201602')
# logging.info('code: %s', rsp.status_code)
# logging.info('hdr : %s', rsp.headers)
# logging.info('cont: %s', rsp.content)
self.assertEqual(rsp.status_code, 200)
rsp_content = svc.RspContent.fromRsp(rsp)
# logging.info('\n%s', pprint.pformat(rsp_content))
CNT = 2
self.assertEqual(rsp_content.totalCount(), CNT)
self.assertEqual(len(rsp_content.itemDictList()), CNT)
self.assertEqual(len(rsp_content.itemDataFrame()), CNT)
def _test_rsp_loop(self):
# lawd_df = lawd_05('o')
# logging.info('key: %s', lawd_df['법정동코드'] )
# r = pd.date_range(start='20000101', end='20200801', freq='M')
r = pd.date_range(start='20060101', end='20061201', freq='M')
# r = pd.date_range(start='20160101', end='20161201', freq='M')
lst = r.format(formatter=lambda x: x.strftime('%Y%m'))
# logging.info('key: %s', type(lst) )
# logging.info('key: %s', lst )
for yyyymm in lst:
# rsp = svc.get_rsp(serviceKey=SVC_KEY, LAWD_CD='11110', DEAL_YMD=yyyymm) # 서울 종로구
# rsp = svc.get_rsp(serviceKey=SVC_KEY, LAWD_CD='28177', DEAL_YMD=yyyymm) # 인천 미추홀
# rsp = svc.get_rsp(serviceKey=SVC_KEY, LAWD_CD='50110', DEAL_YMD=yyyymm) # 제주 제주시
rsp = svc.get_rsp(serviceKey=SVC_KEY, LAWD_CD='42790', DEAL_YMD=yyyymm) # 강원도 화천군
# logging.info('code: %s', rsp.status_code)
# logging.info('hdr : %s', rsp.headers)
# logging.info('cont: %s', rsp.content)
# self.assertEqual(rsp.status_code, 200)
rsp_content = svc.RspContent.fromRsp(rsp)
logging.info('%s: %s', yyyymm, rsp_content.totalCount() )
| 39.007576
| 94
| 0.630608
|
701a0c2770c94977e1b55709cc0697eb36641e25
| 7,060
|
py
|
Python
|
service/monitoringnode.py
|
gabriellsesam/statuspage
|
bd3f381bda18c2f7e45554ba05a859a812ee17ab
|
[
"Apache-2.0"
] | null | null | null |
service/monitoringnode.py
|
gabriellsesam/statuspage
|
bd3f381bda18c2f7e45554ba05a859a812ee17ab
|
[
"Apache-2.0"
] | null | null | null |
service/monitoringnode.py
|
gabriellsesam/statuspage
|
bd3f381bda18c2f7e45554ba05a859a812ee17ab
|
[
"Apache-2.0"
] | 1
|
2020-05-19T08:44:16.000Z
|
2020-05-19T08:44:16.000Z
|
#!/usr/bin/env python3
import datetime
import json
import logging
import os
import sys
from emailsender import Emailsender
from vault_client import VaultClient
import requests
__author__ = "Ravish Ranjan"
required_env_vars = ["SESAM_API_URL", "PAGE_ID", "COMPONENT_ID"]
optional_env_vars = ["GROUP_ID"]
email_env_vars = ["RECIPIENTS", "SMTP_HOST", "SMTP_USERNAME", "SMTP_SENDER"]
status_page_base_url = 'https://api.statuspage.io/v1'
load_vars_from_vault = ["api_key", "jwt", "smtp_password"]
class AppConfig(object):
pass
config = AppConfig()
hashivault = VaultClient()
# load variables
missing_env_vars = list()
for env_var in load_vars_from_vault:
value = hashivault.ensure_has_value(f'sesam-extensions/kv/sesam-monitoring/{env_var}')
if not value:
missing_env_vars.append(env_var)
setattr(config, env_var, value)
for env_var in required_env_vars:
value = os.getenv(env_var)
if not value:
missing_env_vars.append(env_var)
setattr(config, env_var, value)
for env_var in optional_env_vars:
value = os.getenv(env_var)
if value:
setattr(config, env_var, value)
EmailFunctionality = False
missing_email_env_vars = []
for env_var in email_env_vars:
if value:
if env_var == 'RECIPIENTS':
setattr(config, 'RECIPIENTS', json.loads(value))#Get it as list from string.
else:
setattr(config, env_var, value)
EmailFunctionality = True
elif not value and EmailFunctionality:
missing_email_env_vars.append(env_var)
# set logging
log_level = logging.getLevelName(os.environ.get('LOG_LEVEL', 'DEBUG')) # default log level = INFO
logging.basicConfig(level=log_level)
logging.debug(datetime.datetime.now())
logging.debug(f"SESAM instance name: {config.SESAM_API_URL}")
def get_sesam_node_status():
try:
response = requests.get(url=config.SESAM_API_URL + "/api/health",
headers={'Authorization': 'bearer ' + config.jwt})
if response.status_code == 200:
return 'OK'
else:
logging.error(f"Non 200 status code from the Sesam api, got: {response.status_code}")
return 'NOT OK'
except requests.ConnectionError as e:
logging.error(f"Issue while connecting the SESAM Health api {e}.")
return 'NOT OK'
def get_node_type():
try:
response = requests.get(url=config.SESAM_API_URL + '/api/datasets/config:aggregator-storage-node',
headers={'Authorization': 'bearer ' + config.jwt})
if response.status_code == 200:
return 'MULTI'
else:
return 'SINGLE'
except Exception('Problem connecting to sesam node') as e:
raise e
def get_subnodes_status(subnodes):
try:
with requests.session() as session:
session.headers = {'Authorization': 'bearer ' + config.jwt}
problematic_subnodes = []
for s in subnodes:
try:
response = session.get(url=f"{config.SESAM_API_URL}/_/{s}/api/health", timeout=180)
if response.status_code != 200:
problematic_subnodes.append(s)
except Exception as e:
problematic_subnodes.append(s)
problematic_subnodes.sort()
return problematic_subnodes
except Exception as e:
logging.error(f"issue when creating connection to check subnodes status{e}")
raise e
def get_subnodes_from_dataset():
response = requests.get(
url=config.SESAM_API_URL + '/api/datasets/config:aggregator-storage-node/entities?deleted=false&history=false',
headers={'Authorization': 'bearer ' + config.jwt})
subnodes = ['microservices']
for e in json.loads(response.content):
if not e['type'].startswith('system:') and e['type'] != 'metadata':
subnodes.append(e['_id'])
return subnodes
def get_sesam_subnodes_status():
try:
dataset_subnodes = get_subnodes_from_dataset()
if not dataset_subnodes:#Safeguard: If functions returned None
return 'NOT OK'
faulty_subnodes = get_subnodes_status(dataset_subnodes)#Get status of problematic subnodes
if len(faulty_subnodes) != 0:
logging.error(f"Problem with subnodes: {faulty_subnodes}")#Future maybe post this to dataset for emailing.
return faulty_subnodes
else:
return 'OK'
except Exception as e:
logging.error(f"Catched error: {e}")
return 'NOT OK'
def update_status_page(status_data):
if hasattr(config, 'GROUP_ID'):
payload = {'component': {'status': status_data, 'group_id': config.GROUP_ID}}
else:
payload = {'component': {'status': status_data}}
json_data = json.dumps(payload)
try:
response = requests.patch(url=status_page_base_url + '/pages/' + config.PAGE_ID + '/components/' +
config.COMPONENT_ID, data=json_data,
headers={'Accept': 'application/json', 'Authorization': config.api_key})
if response.ok:
logging.info(f"OK, the status page has been updated successfully for component_id : {config.COMPONENT_ID}")
else:
logging.error(f"Some issue while updating the status page : {response.text}")
except requests.ConnectionError:
logging.error(f"Issue while connecting the status page api")
def prepare_payload(status_data):
if status_data is not None:
if status_data == 'OK':
status_data = 'operational'
else:
try:
if EmailFunctionality:
sender = Emailsender(config.SMTP_HOST, config.SMTP_USERNAME, config.smtp_password,
config.SMTP_SENDER)
logging.info(sender.sendMail(config.RECIPIENTS, 'Problems with node {}'.format(config.SESAM_API_URL), status_data))
except Exception as e:
logging.error('Failed to send email because of {}'.format(e))
status_data = 'major_outage'
update_status_page(status_data)
if __name__ == '__main__':
if len(missing_env_vars) != 0:
logging.error(f"Missing the following required environment variable(s) {missing_env_vars}")
sys.exit(1)
elif config.SESAM_API_URL.endswith('/api'):#Backwards compatability
setattr(config, 'SESAM_API_URL', config.SESAM_API_URL[0:-4])
if EmailFunctionality and len(missing_email_env_vars) != 0:
logging.error("Some email variables set but not all! Missing: ".format(missing_email_env_vars))
try:
if get_node_type() == 'MULTI':
prepare_payload(get_sesam_subnodes_status())
else:
prepare_payload(get_sesam_node_status())
except Exception as e:
logging.error(f"Issue getting node type, {e}")
prepare_payload('Issue getting node type')
sys.exit(0)
| 35.477387
| 135
| 0.643343
|
814af10c0d6d65c98ab3f92ae2e4e8a7130f73f0
| 3,114
|
py
|
Python
|
exercises/fit_gaussian_estimators.py
|
baruchis123/IML.HUJI
|
a88be10dbb83072233913082b54df849983f500a
|
[
"MIT"
] | null | null | null |
exercises/fit_gaussian_estimators.py
|
baruchis123/IML.HUJI
|
a88be10dbb83072233913082b54df849983f500a
|
[
"MIT"
] | null | null | null |
exercises/fit_gaussian_estimators.py
|
baruchis123/IML.HUJI
|
a88be10dbb83072233913082b54df849983f500a
|
[
"MIT"
] | null | null | null |
from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
import numpy as np
import plotly.graph_objects as go
import plotly.io as pio
import plotly.express as px
pio.templates.default = "simple_white"
def test_univariate_gaussian():
# Question 1 - Draw samples and print fitted model
batch = np.random.normal(10, 1, (1000, ))
gaussian_estimator = UnivariateGaussian()
gaussian_estimator.fit(batch)
print((gaussian_estimator.mu_, gaussian_estimator.var_))
# Question 2 - Empirically showing sample mean is consistent
X = np.linspace(0, 1000, 100)
mean_samples = []
for i in range(1, 101):
gaussian_estimator.fit(batch[:i*10])
mean_samples.append(gaussian_estimator.mu_)
Y = abs(np.array(mean_samples) - 10)
go.Figure([go.Scatter(x=X, y=Y, mode='lines', name=r'$text{Distance}$')],
layout=go.Layout(title=r"$\text{Absolute Distance between Expectation and Mean of Sample per Sample Size}$",
xaxis_title=r"$\text{Sample Size}$",
yaxis_title=r"$\text{Distance between Expectation and Mean}$",
height=500)).show()
# Question 3 - Plotting Empirical PDF of fitted model
Y_pdf = gaussian_estimator.pdf(batch)
go.Figure([go.Scatter(x=batch, y=Y_pdf, mode='markers', name=r'$text{Gaussian PDF}$')],
layout=go.Layout(title=r"$\text{Gaussian Pdf per Sample Drawn}$",
xaxis_title=r"$\text{Samples}$",
yaxis_title=r"$\text{PDF}$",
height=500)).show()
def test_multivariate_gaussian():
# Question 4 - Draw samples and print fitted model
cov = np.array([[1, 0.2, 0, 0.5], [0.2, 2, 0, 0], [0, 0, 1, 0], [0.5, 0, 0, 1]])
batch = np.random.multivariate_normal(np.array([0, 0, 4, 0]), cov, (1000, ))
multi_variate_gaussian_estimator = MultivariateGaussian()
multi_variate_gaussian_estimator.fit(batch)
print(multi_variate_gaussian_estimator.mu_)
print(multi_variate_gaussian_estimator.cov_)
# Question 5 - Likelihood evaluation
f1 = np.linspace(-10, 10, 200)
f3 = np.linspace(-10, 10, 200)
likelihood_samples = []
for i in f1:
f3_likelihood_samples = []
for j in f3:
mu = np.array([i, 0, j, 0])
likelihood = MultivariateGaussian.log_likelihood(mu, cov, batch)
f3_likelihood_samples.append(likelihood)
likelihood_samples.append(f3_likelihood_samples)
likelihood_matrix = np.array(likelihood_samples)
fig = px.imshow(likelihood_matrix,
labels=dict(x="f1", y="f3", color="Log Likelihood"),
x=f1,
y=f3)
fig.show()
# Question 6 - Maximum likelihood
max_val = np.max(likelihood_matrix)
max_index = np.unravel_index(np.argmax(likelihood_matrix), likelihood_matrix.shape)
print(max_val.round(4), f1[max_index[0]].round(4), f3[max_index[1]].round(4))
if __name__ == '__main__':
np.random.seed(0)
test_univariate_gaussian()
test_multivariate_gaussian()
| 40.441558
| 118
| 0.641618
|
4e9855f14b5657edbd9c26b911ca946e2389f81f
| 6,909
|
py
|
Python
|
Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/pip/req/req_uninstall.py
|
i2tResearch/Ciberseguridad_web
|
ac3dd934a60628532e3538369cb145d9a8f33e4f
|
[
"MIT"
] | 9
|
2021-10-01T22:02:58.000Z
|
2021-11-09T17:48:45.000Z
|
Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/pip/req/req_uninstall.py
|
i2tResearch/Ciberseguridad_web
|
ac3dd934a60628532e3538369cb145d9a8f33e4f
|
[
"MIT"
] | null | null | null |
Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/pip/req/req_uninstall.py
|
i2tResearch/Ciberseguridad_web
|
ac3dd934a60628532e3538369cb145d9a8f33e4f
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import logging
import os
import tempfile
from pip.compat import uses_pycache, WINDOWS, cache_from_source
from pip.exceptions import UninstallationError
from pip.utils import rmtree, ask, is_local, renames, normalize_path
from pip.utils.logging import indent_log
logger = logging.getLogger(__name__)
class UninstallPathSet(object):
"""A set of file paths to be removed in the uninstallation of a
requirement."""
def __init__(self, dist):
self.paths = set()
self._refuse = set()
self.pth = {}
self.dist = dist
self.save_dir = None
self._moved_paths = []
def _permitted(self, path):
"""
Return True if the given path is one we are permitted to
remove/modify, False otherwise.
"""
return is_local(path)
def add(self, path):
head, tail = os.path.split(path)
# we normalize the head to resolve parent directory symlinks, but not
# the tail, since we only want to uninstall symlinks, not their targets
path = os.path.join(normalize_path(head), os.path.normcase(tail))
if not os.path.exists(path):
return
if self._permitted(path):
self.paths.add(path)
else:
self._refuse.add(path)
# __pycache__ files can show up after 'installed-files.txt' is created,
# due to imports
if os.path.splitext(path)[1] == '.py' and uses_pycache:
self.add(cache_from_source(path))
def add_pth(self, pth_file, entry):
pth_file = normalize_path(pth_file)
if self._permitted(pth_file):
if pth_file not in self.pth:
self.pth[pth_file] = UninstallPthEntries(pth_file)
self.pth[pth_file].add(entry)
else:
self._refuse.add(pth_file)
def compact(self, paths):
"""Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path."""
short_paths = set()
for path in sorted(paths, key=len):
if not any([
(path.startswith(shortpath) and
path[len(shortpath.rstrip(os.path.sep))] == os.path.sep)
for shortpath in short_paths]):
short_paths.add(path)
return short_paths
def _stash(self, path):
return os.path.join(
self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep))
def remove(self, auto_confirm=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self.paths:
logger.info(
"Can't uninstall '%s'. No files were found to uninstall.",
self.dist.project_name,
)
return
logger.info(
'Uninstalling %s-%s:',
self.dist.project_name, self.dist.version
)
with indent_log():
paths = sorted(self.compact(self.paths))
if auto_confirm:
response = 'y'
else:
for path in paths:
logger.info(path)
response = ask('Proceed (y/n)? ', ('y', 'n'))
if self._refuse:
logger.info('Not removing or modifying (outside of prefix):')
for path in self.compact(self._refuse):
logger.info(path)
if response == 'y':
self.save_dir = tempfile.mkdtemp(suffix='-uninstall',
prefix='pip-')
for path in paths:
new_path = self._stash(path)
logger.debug('Removing file or directory %s', path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in self.pth.values():
pth.remove()
logger.info(
'Successfully uninstalled %s-%s',
self.dist.project_name, self.dist.version
)
def rollback(self):
"""Rollback the changes previously made by remove()."""
if self.save_dir is None:
logger.error(
"Can't roll backend %s; was not uninstalled",
self.dist.project_name,
)
return False
logger.info('Rolling backend uninstall of %s', self.dist.project_name)
for path in self._moved_paths:
tmp_path = self._stash(path)
logger.debug('Replacing %s', path)
renames(tmp_path, path)
for pth in self.pth.values():
pth.rollback()
def commit(self):
"""Remove temporary save dir: rollback will no longer be possible."""
if self.save_dir is not None:
rmtree(self.save_dir)
self.save_dir = None
self._moved_paths = []
class UninstallPthEntries(object):
def __init__(self, pth_file):
if not os.path.isfile(pth_file):
raise UninstallationError(
"Cannot remove entries from nonexistent file %s" % pth_file
)
self.file = pth_file
self.entries = set()
self._saved_lines = None
def add(self, entry):
entry = os.path.normcase(entry)
# On Windows, os.path.normcase converts the entry to use
# backslashes. This is correct for entries that describe absolute
# paths outside of site-packages, but all the others use forward
# slashes.
if WINDOWS and not os.path.splitdrive(entry)[0]:
entry = entry.replace('\\', '/')
self.entries.add(entry)
def remove(self):
logger.debug('Removing pth entries from %s:', self.file)
with open(self.file, 'rb') as fh:
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
lines = fh.readlines()
self._saved_lines = lines
if any(b'\r\n' in line for line in lines):
endline = '\r\n'
else:
endline = '\n'
for entry in self.entries:
try:
logger.debug('Removing entry: %s', entry)
lines.remove((entry + endline).encode("utf-8"))
except ValueError:
pass
with open(self.file, 'wb') as fh:
fh.writelines(lines)
def rollback(self):
if self._saved_lines is None:
logger.error(
'Cannot roll backend changes to %s, none were made', self.file
)
return False
logger.debug('Rolling %s backend to previous state', self.file)
with open(self.file, 'wb') as fh:
fh.writelines(self._saved_lines)
return True
| 35.25
| 79
| 0.558402
|
c130487451ae0ac400ad51279bc26a92b1abfe69
| 490
|
py
|
Python
|
recipes/libnl/all/test_package/conanfile.py
|
rockandsalt/conan-center-index
|
d739adcec3e4dd4c250eff559ceb738e420673dd
|
[
"MIT"
] | 562
|
2019-09-04T12:23:43.000Z
|
2022-03-29T16:41:43.000Z
|
recipes/libnl/all/test_package/conanfile.py
|
rockandsalt/conan-center-index
|
d739adcec3e4dd4c250eff559ceb738e420673dd
|
[
"MIT"
] | 9,799
|
2019-09-04T12:02:11.000Z
|
2022-03-31T23:55:45.000Z
|
recipes/libnl/all/test_package/conanfile.py
|
rockandsalt/conan-center-index
|
d739adcec3e4dd4c250eff559ceb738e420673dd
|
[
"MIT"
] | 1,126
|
2019-09-04T11:57:46.000Z
|
2022-03-31T16:43:38.000Z
|
import os.path
from conans import ConanFile, CMake, tools
class NetlinkTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self):
bin_path = os.path.join(self.build_folder, "bin", "show_links")
self.run(bin_path, cwd=self.source_folder, run_environment=True)
| 25.789474
| 76
| 0.640816
|
ee91c4195335014b61e3f949ceb66013360a80db
| 698
|
py
|
Python
|
tests/unit/notice_fetcher/test_fake_ted_api.py
|
meaningfy-ws/ted-xml-2-rdf
|
ac26a19f3761b7cf79d79a46be6323b658f067eb
|
[
"Apache-2.0"
] | 1
|
2022-03-21T12:32:52.000Z
|
2022-03-21T12:32:52.000Z
|
tests/unit/notice_fetcher/test_fake_ted_api.py
|
meaningfy-ws/ted-xml-2-rdf
|
ac26a19f3761b7cf79d79a46be6323b658f067eb
|
[
"Apache-2.0"
] | 24
|
2022-02-10T10:43:56.000Z
|
2022-03-29T12:36:21.000Z
|
tests/unit/notice_fetcher/test_fake_ted_api.py
|
meaningfy-ws/ted-sws
|
d1e351eacb2900f84ec7edc457e49d8202fbaff5
|
[
"Apache-2.0"
] | null | null | null |
import datetime
from tests.fakes.fake_ted_api import FakeTedApiAdapter
def test_fake_ted_api():
fake_document_search = FakeTedApiAdapter()
get_by_date = fake_document_search.get_by_range_date(start_date=datetime.date(2020, 1, 1),
end_date=datetime.date(2020, 1, 2))
get_by_query = fake_document_search.get_by_query(query={"q": "PD=[]"})
assert isinstance(get_by_date, list)
assert len(get_by_date) == 1
assert len(get_by_query) == 1
assert isinstance(get_by_query, list)
assert fake_document_search.get_by_id(document_id="ID")
assert isinstance(fake_document_search.get_by_id(document_id="ID"), dict)
| 38.777778
| 94
| 0.702006
|
51831978d8d6b549a7dc1d2900745118dc39faec
| 11,024
|
py
|
Python
|
core/domain/search_services.py
|
bching/oppia
|
9e9b6d756859b8bc1e46f88a1be8736f8398a8d8
|
[
"Apache-2.0"
] | 1
|
2017-11-30T02:16:01.000Z
|
2017-11-30T02:16:01.000Z
|
core/domain/search_services.py
|
bching/oppia
|
9e9b6d756859b8bc1e46f88a1be8736f8398a8d8
|
[
"Apache-2.0"
] | null | null | null |
core/domain/search_services.py
|
bching/oppia
|
9e9b6d756859b8bc1e46f88a1be8736f8398a8d8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for operating on the search status of activities."""
from core.domain import rights_manager
from core.platform import models
search_services = models.Registry.import_search_services()
# Name for the exploration search index.
SEARCH_INDEX_EXPLORATIONS = 'explorations'
# Name for the collection search index.
SEARCH_INDEX_COLLECTIONS = 'collections'
# This is done to prevent the rank hitting 0 too easily. Note that
# negative ranks are disallowed in the Search API.
_DEFAULT_RANK = 20
def index_exploration_summaries(exp_summaries):
"""Adds the explorations to the search index.
Args:
exp_summaries: list(ExpSummaryModel). List of Exp Summary domain
objects to be indexed.
"""
search_services.add_documents_to_index([
_exp_summary_to_search_dict(exp_summary)
for exp_summary in exp_summaries
if _should_index_exploration(exp_summary)
], SEARCH_INDEX_EXPLORATIONS)
def _exp_summary_to_search_dict(exp_summary):
"""Updates the dict to be returned, whether the given exploration is to
be indexed for further queries or not.
Args:
exp_summary: ExpSummaryModel. ExplorationSummary domain object.
Returns:
dict. The representation of the given exploration, in a form that can
be used by the search index.
"""
doc = {
'id': exp_summary.id,
'language_code': exp_summary.language_code,
'title': exp_summary.title,
'category': exp_summary.category,
'tags': exp_summary.tags,
'objective': exp_summary.objective,
'rank': get_search_rank_from_exp_summary(exp_summary),
}
return doc
def _should_index_exploration(exp_summary):
"""Returns whether the given exploration should be indexed for future
search queries.
Args:
exp_summary: ExpSummaryModel. ExplorationSummary domain object.
"""
rights = rights_manager.get_exploration_rights(exp_summary.id)
return rights.status != rights_manager.ACTIVITY_STATUS_PRIVATE
def get_search_rank_from_exp_summary(exp_summary):
"""Returns an integer determining the document's rank in search.
Featured explorations get a ranking bump, and so do explorations that
have been more recently updated. Good ratings will increase the ranking
and bad ones will lower it.
Args:
exp_summary: ExplorationSummary. ExplorationSummary domain object.
Returns:
int. Document's rank in search.
"""
# TODO(sll): Improve this calculation.
rating_weightings = {'1': -5, '2': -2, '3': 2, '4': 5, '5': 10}
rank = _DEFAULT_RANK
if exp_summary.ratings:
for rating_value in exp_summary.ratings:
rank += (
exp_summary.ratings[rating_value] *
rating_weightings[rating_value])
# Ranks must be non-negative.
return max(rank, 0)
def index_collection_summaries(collection_summaries):
"""Adds the collections to the search index.
Args:
collection_summaries: list(CollectionSummaryModel). List of
Collection Summary domain objects to be indexed.
"""
search_services.add_documents_to_index([
_collection_summary_to_search_dict(collection_summary)
for collection_summary in collection_summaries
if _should_index_collection(collection_summary)
], SEARCH_INDEX_COLLECTIONS)
def update_exploration_status_in_search(exp_id):
"""Updates the exploration status in its search doc.
Args:
exp_id: str. The id of the exploration whose status is to be
updated.
"""
rights = rights_manager.get_exploration_rights(exp_id)
if rights.status == rights_manager.ACTIVITY_STATUS_PRIVATE:
delete_explorations_from_search_index([exp_id])
else:
patch_exploration_search_document(rights.id, {})
def _collection_summary_to_search_dict(collection_summary):
"""Converts a collection domain object to a search dict.
Args:
collection_summary: CollectionSummaryModel. The collection
summary object to be converted.
Returns:
dict. The search dict of the collection domain object.
"""
doc = {
'id': collection_summary.id,
'title': collection_summary.title,
'category': collection_summary.category,
'objective': collection_summary.objective,
'language_code': collection_summary.language_code,
'tags': collection_summary.tags,
'rank': _DEFAULT_RANK,
}
return doc
def _should_index_collection(collection):
"""Checks if a particular collection should be indexed.
Args:
collection: CollectionSummaryModel.
"""
rights = rights_manager.get_collection_rights(collection.id)
return rights.status != rights_manager.ACTIVITY_STATUS_PRIVATE
def search_explorations(query, limit, sort=None, cursor=None):
"""Searches through the available explorations.
Args:
query_string: str or None. The query string to search for.
limit: int. The maximum number of results to return.
sort: str. A string indicating how to sort results. This should be a
string of space separated values. Each value should start with a
'+' or a '-' character indicating whether to sort in ascending or
descending order respectively. This character should be followed by
a field name to sort on. When this is None, results are based on
'rank'. get_search_rank_from_exp_summary to see how
rank is determined.
cursor: str or None. A cursor, used to get the next page of results. If
there are more documents that match the query than 'limit', this
function will return a cursor to get the next page.
Returns:
tuple. A 2-tuple consisting of:
- list(str). A list of exploration ids that match the query.
- str or None. A cursor if there are more matching explorations to
fetch, None otherwise. If a cursor is returned, it will be a
web-safe string that can be used in URLs.
"""
return search_services.search(
query, SEARCH_INDEX_EXPLORATIONS, cursor, limit, sort, ids_only=True)
def delete_explorations_from_search_index(exploration_ids):
"""Deletes the documents corresponding to these exploration_ids from the
search index.
Args:
exploration_ids: list(str). A list of exploration ids whose
documents are to be deleted from the search index.
"""
search_services.delete_documents_from_index(
exploration_ids, SEARCH_INDEX_EXPLORATIONS)
def patch_exploration_search_document(exp_id, update):
"""Patches an exploration's current search document, with the values
from the 'update' dictionary.
Args:
exp_id: str. The id of the exploration to be patched.
update: dict. Key-value pairs to patch the exploration's search
document with.
"""
doc = search_services.get_document_from_index(
exp_id, SEARCH_INDEX_EXPLORATIONS)
doc.update(update)
search_services.add_documents_to_index([doc], SEARCH_INDEX_EXPLORATIONS)
def clear_exploration_search_index():
"""WARNING: This runs in-request, and may therefore fail if there are too
many entries in the index.
"""
search_services.clear_index(SEARCH_INDEX_EXPLORATIONS)
def search_collections(query, limit, sort=None, cursor=None):
"""Searches through the available collections.
Args:
query_string: str or None. the query string to search for.
limit: int. the maximum number of results to return.
sort: str. This indicates how to sort results. This should be a string
of space separated values. Each value should start with a '+' or a
'-' character indicating whether to sort in ascending or descending
order respectively. This character should be followed by a field
name to sort on. When this is None, results are returned based on
their ranking (which is currently set to the same default value
for all collections).
cursor: str or None. A cursor, used to get the next page of results.
If there are more documents that match the query than 'limit', this
function will return a cursor to get the next page.
Returns:
A 2-tuple with the following elements:
- A list of collection ids that match the query.
- A cursor if there are more matching collections to fetch, None
otherwise. If a cursor is returned, it will be a web-safe string
that can be used in URLs.
"""
return search_services.search(
query, SEARCH_INDEX_COLLECTIONS, cursor, limit, sort, ids_only=True)
def delete_collections_from_search_index(collection_ids):
"""Removes the given collections from the search index.
Args:
collection_ids: list(str). List of IDs of the collections to be removed
from the search index.
"""
search_services.delete_documents_from_index(
collection_ids, SEARCH_INDEX_COLLECTIONS)
def patch_collection_search_document(collection_id, update):
"""Patches an collection's current search document, with the values
from the 'update' dictionary.
Args:
collection_id: str. ID of the collection to be patched.
update: dict. Key-value pairs to patch the current search document with.
"""
doc = search_services.get_document_from_index(
collection_id, SEARCH_INDEX_COLLECTIONS)
doc.update(update)
search_services.add_documents_to_index([doc], SEARCH_INDEX_COLLECTIONS)
def clear_collection_search_index():
"""Clears the search index.
WARNING: This runs in-request, and may therefore fail if there are too
many entries in the index.
"""
search_services.clear_index(SEARCH_INDEX_COLLECTIONS)
def update_collection_status_in_search(collection_id):
"""Updates the status field of a collection in the search index.
Args:
collection_id: str. ID of the collection.
"""
rights = rights_manager.get_collection_rights(collection_id)
if rights.status == rights_manager.ACTIVITY_STATUS_PRIVATE:
delete_collections_from_search_index([collection_id])
else:
patch_collection_search_document(rights.id, {})
| 36.503311
| 80
| 0.706005
|
ba13b7033d1f16869f1b1de451a5f06cf1e2db5f
| 13,342
|
py
|
Python
|
optimization.py
|
h4ste/pronto
|
5815aefc2a1fb0e909631b719193acaa09d58457
|
[
"MIT"
] | 2
|
2020-03-03T20:51:09.000Z
|
2020-07-21T03:47:11.000Z
|
optimization.py
|
h4ste/pronto
|
5815aefc2a1fb0e909631b719193acaa09d58457
|
[
"MIT"
] | 1
|
2020-07-22T10:05:41.000Z
|
2020-07-23T02:40:08.000Z
|
optimization.py
|
h4ste/pronto
|
5815aefc2a1fb0e909631b719193acaa09d58457
|
[
"MIT"
] | null | null | null |
import re
from typing import Union
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops, control_flow_ops
import modeling
class PRONTOOptimizer(object):
def __init__(self, model, sparse, learning_rate):
"""
Creates a new PRONTOOptimizer responsible for optimizing PRONTO. Allegedly, some day I will get around to
looking at other optimization strategies (e.g., sequence optimization).
:param model: a PRONTOModel object
:type model: modeling.PRONTOModel
:param sparse: whether to use sparse softmax or not
:type sparse: bool
:param learning_rate: learning rate of the optimizer
:type learning_rate: float
"""
self.model = model
# If sparse calculate sparse softmax directly from integer labels
if sparse:
self.loss = tf.losses.sparse_softmax_cross_entropy(model.labels, model.logits)
# If not sparse, convert labels to one hots and do softmax
else:
y_true = tf.one_hot(model.labels, model.num_classes, name='labels_onehot')
self.loss = tf.losses.softmax_cross_entropy(y_true, model.logits)
# Global step used for coordinating summarizes and checkpointing
self.global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
# Training operation: fetch this to run a step of the adam optimizer!
self.train_op = tf.train.AdamOptimizer(learning_rate).minimize(self.loss, self.global_step)
class BERTOptimizer(object):
def __init__(self, model, num_train_steps, num_warmup_steps=None, init_lr=1e-3,
lr_decay=True, clip_norm=1.0):
"""
Creates a new PRONTOOptimizer responsible for optimizing PRONTO. Allegedly, some day I will get around to
looking at other optimization strategies (e.g., sequence optimization).
:param model: a PRONTOModel object
:type model: modeling.PRONTOModel
:param num_train_steps: number of training steps between decay steps
:type num_train_steps: int
:param num_warmup_steps: number of training steps before starting decay
:type num_warmup_steps: int
:param init_lr: initial learning rate of the optimizer
:type init_lr: float
"""
self.model = model
# Global step used for coordinating summarizes and checkpointing
self.global_step = tf.Variable(0, dtype=tf.int32, trainable=False,
name='global_step') # type: Union[int, tf.Variable]
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of learning rate
if lr_decay:
learning_rate = polynomial_decay(
learning_rate,
self.global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False
)
# Implements linear warm up. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(self.global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.1,
beta_1=.9,
beta_2=0.999,
epsilon=1e-7,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]
)
self.loss = tf.losses.sparse_softmax_cross_entropy(model.labels, model.logits)
tvars = tf.trainable_variables()
grads = tf.gradients(self.loss, tvars)
if clip_norm:
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=clip_norm)
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step)
new_global_step = self.global_step + 1
self.train_op = tf.group(train_op, [self.global_step.assign(new_global_step)])
# noinspection PyAbstractClass
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
# noinspection PyMethodMayBeStatic
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
def polynomial_decay(learning_rate,
global_step,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False,
name=None):
"""Applies a polynomial decay to the learning rate.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This function applies a polynomial decay function to a provided initial
`learning_rate` to reach an `end_learning_rate` in the given `decay_steps`.
It requires a `global_step` value to compute the decayed learning rate. You
can just pass a TensorFlow variable that you increment at each training step.
The function returns a no-arg callable that outputs the decayed learning
rate. This can be useful for changing the learning rate value across
different invocations of optimizer functions. It is computed as:
```python
global_step = min(global_step, decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `global_steps`.
```python
decay_steps = decay_steps * ceil(global_step / decay_steps)
decayed_learning_rate_fn = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
decayed_learning_rate = decayed_learning_rate_fn()
```
Example: decay from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5):
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
end_learning_rate = 0.01
decay_steps = 10000
learning_rate_fn = tf.train.polynomial_decay(starter_learning_rate,
global_step, decay_steps,
end_learning_rate,
power=0.5)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate_fn)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
end_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The minimal end learning rate.
power: A scalar `float32` or `float64` `Tensor` or a
Python number. The power of the polynomial. Defaults to linear, 1.0.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to
'PolynomialDecay'.
Returns:
A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
of the same type as `learning_rate`.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("global_step is required for polynomial_decay.")
with ops.name_scope(
name, "PolynomialDecay",
[learning_rate, global_step, decay_steps, end_learning_rate, power]
) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
end_learning_rate = math_ops.cast(end_learning_rate, dtype)
power = math_ops.cast(power, dtype)
global_step_recomp = math_ops.cast(global_step, dtype)
decay_steps_recomp = math_ops.cast(decay_steps, dtype)
if cycle:
# Find the first multiple of decay_steps that is bigger than
# global_step. If global_step is zero set the multiplier to 1
multiplier = control_flow_ops.cond(
math_ops.equal(global_step_recomp, 0), lambda: 1.0,
lambda: math_ops.ceil(global_step_recomp / decay_steps))
decay_steps_recomp = math_ops.multiply(decay_steps_recomp, multiplier)
else:
# Make sure that the global_step used is not bigger than decay_steps.
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
p = math_ops.div(global_step_recomp, decay_steps_recomp)
return math_ops.add(
math_ops.multiply(learning_rate - end_learning_rate,
math_ops.pow(1 - p, power)),
end_learning_rate,
name=name)
| 43.318182
| 113
| 0.621646
|
03e33703ddb5f9e10063a736833010407e6f494c
| 9,755
|
py
|
Python
|
minesweeper.py
|
RAMESSESII2/Minesweepwer_AI
|
762563a9fb28ae1bbc489cf62882d9cae431387b
|
[
"MIT"
] | null | null | null |
minesweeper.py
|
RAMESSESII2/Minesweepwer_AI
|
762563a9fb28ae1bbc489cf62882d9cae431387b
|
[
"MIT"
] | null | null | null |
minesweeper.py
|
RAMESSESII2/Minesweepwer_AI
|
762563a9fb28ae1bbc489cf62882d9cae431387b
|
[
"MIT"
] | null | null | null |
import itertools
import random
class Minesweeper:
"""
Minesweeper game representation
"""
def __init__(self, height=8, width=8, mines=8):
# Set initial width, height, and number of mines
self.height = height
self.width = width
self.mines = set()
# Initialize an empty field with no mines
self.board = []
for i in range(self.height):
row = []
for j in range(self.width):
row.append(False)
self.board.append(row)
# Add mines randomly
while len(self.mines) != mines:
i = random.randrange(height)
j = random.randrange(width)
if not self.board[i][j]:
self.mines.add((i, j))
self.board[i][j] = True
# At first, player has found no mines
self.mines_found = set()
def print(self):
"""
Prints a text-based representation
of where mines are located.
"""
for i in range(self.height):
print("--" * self.width + "-")
for j in range(self.width):
if self.board[i][j]:
print("|X", end="")
else:
print("| ", end="")
print("|")
print("--" * self.width + "-")
def is_mine(self, cell):
i, j = cell
return self.board[i][j]
def nearby_mines(self, cell):
"""
Returns the number of mines that are
within one row and column of a given cell,
not including the cell itself.
"""
# Keep count of nearby mines
count = 0
# Loop over all cells within one row and column
for i in range(cell[0] - 1, cell[0] + 2):
for j in range(cell[1] - 1, cell[1] + 2):
# Ignore the cell itself
if (i, j) == cell:
continue
# Update count if cell in bounds and is mine
if 0 <= i < self.height and 0 <= j < self.width:
if self.board[i][j]:
count += 1
return count
def won(self):
"""
Checks if all mines have been flagged.
"""
return self.mines_found == self.mines
class Sentence:
"""
Logical statement about a Minesweeper game
A sentence consists of a set of board cells,
and a count of the number of those cells which are mines.
"""
def __init__(self, cells, count):
self.cells = set(cells)
self.count = count
def __eq__(self, other):
return self.cells == other.cells and self.count == other.count
def __str__(self):
return f"{self.cells} = {self.count}"
def known_mines(self):
"""
Returns the set of all cells in self.cells known to be mines.
"""
if self.count == len(self.cells):
return self.cells
else:
return set()
def known_safes(self):
"""
Returns the set of all cells in self.cells known to be safe.
"""
if self.count == 0:
return self.cells
else:
return set()
def mark_mine(self, cell):
"""
Updates internal knowledge representation given the fact that
a cell is known to be a mine.
"""
if cell in self.cells:
self.cells.remove(cell)
self.count -= 1
def mark_safe(self, cell):
"""
Updates internal knowledge representation given the fact that
a cell is known to be safe.
"""
if cell in self.cells:
self.cells.remove(cell)
class MinesweeperAI:
"""
Minesweeper game player
"""
def __init__(self, height=8, width=8):
# Set initial height and width
self.height = height
self.width = width
# Keep track of which cells have been clicked on
self.moves_made = set()
# Keep track of cells known to be safe or mines
self.mines = set()
self.safes = set()
# List of sentences about the game known to be true
self.knowledge = []
def mark_mine(self, cell):
"""
Marks a cell as a mine, and updates all knowledge
to mark that cell as a mine as well.
"""
self.mines.add(cell)
for sentence in self.knowledge:
sentence.mark_mine(cell)
def mark_safe(self, cell):
"""
Marks a cell as safe, and updates all knowledge
to mark that cell as safe as well.
"""
self.safes.add(cell)
for sentence in self.knowledge:
sentence.mark_safe(cell)
def add_knowledge(self, cell, count):
"""
Called when the Minesweeper board tells us, for a given
safe cell, how many neighboring cells have mines in them.
This function should:
1) mark the cell as a move that has been made
2) mark the cell as safe
3) add a new sentence to the AI's knowledge base
based on the value of `cell` and `count`
4) mark any additional cells as safe or as mines
if it can be concluded based on the AI's knowledge base
5) add any new sentences to the AI's knowledge base
if they can be inferred from existing knowledge
"""
# cells already explored
self.moves_made.add(cell)
# marks the cell safe
self.mark_safe(cell)
# find all the neighbours of a given cell
neighbours = self.find_neighbours(cell)
# remove cells from neighbours whose state is already known
to_remove = set()
n_count = count
for nb in neighbours:
if nb in self.mines:
to_remove.add(nb)
n_count -= 1
if nb in self.safes:
to_remove.add(nb)
neighbours -= to_remove
# new sentence to KB of AI
n_sentence = Sentence(neighbours, n_count)
self.knowledge.append(n_sentence)
# mark mines of safe based on the conclusion of AI's new KB
# Be careful not to modify a set while iterating over it. Doing so may result in errors!
mi = set()
sf = set()
for sentence in self.knowledge:
for mine in sentence.known_mines():
mi.add(mine)
for safe in sentence.known_safes():
sf.add(safe)
for mine in mi:
self.mark_mine(mine)
for safe in sf:
self.mark_safe(safe)
# remove empty sentences based on new knowledge
empty_sentence = []
for sentence in self.knowledge:
if sentence.cells == set():
empty_sentence.append(sentence)
for sentence in empty_sentence:
self.knowledge.remove(sentence)
# finally infer from the current KB
add_sentence = []
for sentence in self.knowledge:
if sentence.cells != n_sentence.cells and n_sentence.cells:
if sentence.cells.issubset(n_sentence.cells):
s = Sentence(
n_sentence.cells - sentence.cells,
n_sentence.count - sentence.count,
)
add_sentence.append(s)
if n_sentence.cells.issubset(sentence.cells):
s = Sentence(
sentence.cells - n_sentence.cells,
sentence.count - n_sentence.count,
)
add_sentence.append(s)
self.knowledge += add_sentence
print(cell)
for sentence in self.knowledge:
print(f"{sentence.cells} = {sentence.count}")
def find_neighbours(self, cell):
nbrs = set()
h = self.height - 1
w = self.width - 1
r, c = cell[0], cell[1]
# cells in the upper row
if r - 1 >= 0:
nbrs.add((r - 1, c))
if c - 1 >= 0:
nbrs.add((r - 1, c - 1))
if c + 1 <= w:
nbrs.add((r - 1, c + 1))
# cells in the same row
if c + 1 <= w:
nbrs.add((r, c + 1))
if c - 1 >= 0:
nbrs.add((r, c - 1))
# cells in the row one below
if r + 1 <= h:
nbrs.add((r + 1, c))
if c - 1 >= 0:
nbrs.add((r + 1, c - 1))
if c + 1 <= w:
nbrs.add((r + 1, c + 1))
return nbrs
def make_safe_move(self):
"""
Returns a safe cell to choose on the Minesweeper board.
The move must be known to be safe, and not already a move
that has been made.
This function may use the knowledge in self.mines, self.safes
and self.moves_made, but should not modify any of those values.
"""
for cell in self.safes:
if cell not in self.moves_made:
print("safe cell found", cell)
return cell
print("No safe cells found")
return None
def make_random_move(self):
"""
Returns a move to make on the Minesweeper board.
Should choose randomly among cells that:
1) have not already been chosen, and
2) are not known to be mines
"""
h = self.height
w = self.width
moves = []
for i in range(h):
for j in range(w):
if (i, j) not in self.moves_made and (i, j) not in self.mines:
moves.append((i, j))
if not moves:
return None
r_move = random.choice(moves)
return r_move
| 30.201238
| 96
| 0.522194
|
c074b5bc38991ece2b9f1bf9e062fc9690bc0393
| 8,056
|
py
|
Python
|
backend-tests/tests/test_create_org.py
|
drewmoseley/integration
|
37f6374eb5faa710d14861cf5ed82e8f9cf0b149
|
[
"Apache-2.0"
] | null | null | null |
backend-tests/tests/test_create_org.py
|
drewmoseley/integration
|
37f6374eb5faa710d14861cf5ed82e8f9cf0b149
|
[
"Apache-2.0"
] | 98
|
2020-09-21T06:00:11.000Z
|
2022-03-28T01:17:19.000Z
|
backend-tests/tests/test_create_org.py
|
drewmoseley/integration
|
37f6374eb5faa710d14861cf5ed82e8f9cf0b149
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import time
import logging
import asyncore
from threading import Thread
from testutils.common import mongo, clean_mongo, randstr
from testutils.api.client import ApiClient
import testutils.api.useradm as useradm
import testutils.api.tenantadm as tenantadm
import testutils.api.deviceauth as deviceauth_v1
import testutils.integration.stripe as stripeutils
from testutils.infra.cli import CliTenantadm
@pytest.yield_fixture(scope="function")
def clean_migrated_mongo(clean_mongo):
tenantadm_cli = CliTenantadm()
tenantadm_cli.migrate()
yield clean_mongo
class TestCreateOrganizationEnterprise:
def _cleanup_stripe(self, tenant_email):
cust = stripeutils.customer_for_tenant(tenant_email)
stripeutils.delete_cust(cust["id"])
def test_success(self, clean_migrated_mongo):
tc = ApiClient(tenantadm.URL_MGMT)
uc = ApiClient(useradm.URL_MGMT)
tenantadmi = ApiClient(tenantadm.URL_INTERNAL)
devauthi = ApiClient(deviceauth_v1.URL_INTERNAL)
logging.info("Starting TestCreateOrganizationEnterprise")
tenant = "tenant{}".format(randstr())
email = "some.user@{}.com".format(tenant)
payload = {
"request_id": "123456",
"organization": tenant,
"email": email,
"password": "asdfqwer1234",
"g-recaptcha-response": "foobar",
"token": "tok_visa",
}
r = tc.post(tenantadm.URL_MGMT_TENANTS, data=payload)
assert r.status_code == 202
# Try log in every second for 3 minutes.
# - There usually is a slight delay (in order of ms) for propagating
# the created user to the db.
for i in range(3 * 60):
rsp = uc.call("POST", useradm.URL_LOGIN, auth=(email, "asdfqwer1234"),)
if rsp.status_code == 200:
break
time.sleep(1)
if rsp.status_code != 200:
raise ValueError(
"User could not log in within three minutes after organization has been created."
)
# get the tenant id (and verify that only one tenant exists)
r = tenantadmi.call("GET", tenantadm.URL_INTERNAL_TENANTS)
assert r.status_code == 200
api_tenants = r.json()
assert len(api_tenants) == 1
# verify the device limit via internal api
# the default plan is "os" so the device limit should be set to 50
r = devauthi.call(
"GET",
deviceauth_v1.URL_INTERNAL_LIMITS_MAX_DEVICES,
path_params={"tid": api_tenants[0]["id"]},
)
assert r.status_code == 200
assert r.json()["limit"] == 50
# verify there is a stripe customer with a correctly assigned source
cust = stripeutils.customer_for_tenant(email)
assert cust.default_source is not None
assert len(cust.sources) == 1
self._cleanup_stripe(email)
def test_success_with_plan(self, clean_migrated_mongo):
tc = ApiClient(tenantadm.URL_MGMT)
uc = ApiClient(useradm.URL_MGMT)
tenantadmi = ApiClient(tenantadm.URL_INTERNAL)
devauthi = ApiClient(deviceauth_v1.URL_INTERNAL)
logging.info("Starting TestCreateOrganizationEnterprise")
tenant = "tenant{}".format(randstr())
email = "some.user@{}.com".format(tenant)
payload = {
"request_id": "123456",
"organization": tenant,
"email": email,
"password": "asdfqwer1234",
"g-recaptcha-response": "foobar",
"plan": "professional",
"token": "tok_visa",
}
r = tc.post(tenantadm.URL_MGMT_TENANTS, data=payload)
assert r.status_code == 202
# Try log in every second for 5 minutes.
# Creating organization is an async job
# and may take some time to complete.
for i in range(5 * 60):
rsp = uc.call("POST", useradm.URL_LOGIN, auth=(email, "asdfqwer1234"),)
if rsp.status_code == 200:
break
time.sleep(1)
if rsp.status_code != 200:
raise ValueError(
"User could not log in within five minutes after organization has been created."
)
# get the tenant id (and verify that only one tenant exists)
r = tenantadmi.call("GET", tenantadm.URL_INTERNAL_TENANTS)
assert r.status_code == 200
api_tenants = r.json()
assert len(api_tenants) == 1
# verify the device limit via internal api
# the device limit for professional plan should be 250
r = devauthi.call(
"GET",
deviceauth_v1.URL_INTERNAL_LIMITS_MAX_DEVICES,
path_params={"tid": api_tenants[0]["id"]},
)
assert r.status_code == 200
assert r.json()["limit"] == 250
self._cleanup_stripe(email)
def test_duplicate_organization_name(self, clean_migrated_mongo):
tc = ApiClient(tenantadm.URL_MGMT)
tenant = "tenant{}".format(randstr())
email = "some.user@{}.com".format(tenant)
payload = {
"request_id": "123456",
"organization": tenant,
"email": email,
"password": "asdfqwer1234",
"g-recaptcha-response": "foobar",
"token": "tok_visa",
}
rsp = tc.post(tenantadm.URL_MGMT_TENANTS, data=payload)
assert rsp.status_code == 202
email2 = "some.user1@{}.com".format(tenant)
payload = {
"request_id": "123457",
"organization": tenant,
"email": email2,
"password": "asdfqwer1234",
"g-recaptcha-response": "foobar",
"token": "tok_visa",
}
rsp = tc.post(tenantadm.URL_MGMT_TENANTS, data=payload)
assert rsp.status_code == 202
self._cleanup_stripe(email)
self._cleanup_stripe(email2)
def test_duplicate_email(self, clean_migrated_mongo):
tc = ApiClient(tenantadm.URL_MGMT)
tenant = "tenant{}".format(randstr())
email = "some.user@{}.com".format(tenant)
payload = {
"request_id": "123456",
"organization": tenant,
"email": email,
"password": "asdfqwer1234",
"g-recaptcha-response": "foobar",
"token": "tok_visa",
}
rsp = tc.post(tenantadm.URL_MGMT_TENANTS, data=payload)
assert rsp.status_code == 202
tenant = "tenant{}".format(randstr())
payload = {
"request_id": "123457",
"organization": tenant,
"email": email,
"password": "asdfqwer1234",
"g-recaptcha-response": "foobar",
"token": "tok_visa",
}
rsp = tc.post(tenantadm.URL_MGMT_TENANTS, data=payload)
assert rsp.status_code == 409
self._cleanup_stripe(email)
def test_plan_invalid(self, clean_migrated_mongo):
tc = ApiClient(tenantadm.URL_MGMT)
payload = {
"request_id": "123456",
"organization": "tenant-foo",
"email": "some.user@example.com",
"password": "asdfqwer1234",
"g-recaptcha-response": "foobar",
"plan": "foo",
"token": "tok_visa",
}
rsp = tc.post(tenantadm.URL_MGMT_TENANTS, data=payload)
assert rsp.status_code == 400
| 34.42735
| 97
| 0.605263
|
8f196ad18a1bda1f19622f260ce8a3095f82ac4f
| 16,415
|
py
|
Python
|
research/object_detection/builders/preprocessor_builder.py
|
KSomi/models
|
cc6c45ca6b701426d35bbbab104ad32a2e80a3cf
|
[
"Apache-2.0"
] | 25
|
2018-11-02T14:14:36.000Z
|
2022-01-31T09:00:12.000Z
|
research/object_detection/builders/preprocessor_builder.py
|
KSomi/models
|
cc6c45ca6b701426d35bbbab104ad32a2e80a3cf
|
[
"Apache-2.0"
] | 12
|
2020-03-24T17:53:50.000Z
|
2022-03-12T00:05:19.000Z
|
research/object_detection/builders/preprocessor_builder.py
|
KSomi/models
|
cc6c45ca6b701426d35bbbab104ad32a2e80a3cf
|
[
"Apache-2.0"
] | 13
|
2019-11-06T17:23:29.000Z
|
2019-11-29T13:03:07.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder for preprocessing steps."""
import tensorflow as tf
from object_detection.core import preprocessor
from object_detection.protos import preprocessor_pb2
def _get_step_config_from_proto(preprocessor_step_config, step_name):
"""Returns the value of a field named step_name from proto.
Args:
preprocessor_step_config: A preprocessor_pb2.PreprocessingStep object.
step_name: Name of the field to get value from.
Returns:
result_dict: a sub proto message from preprocessor_step_config which will be
later converted to a dictionary.
Raises:
ValueError: If field does not exist in proto.
"""
for field, value in preprocessor_step_config.ListFields():
if field.name == step_name:
return value
raise ValueError('Could not get field %s from proto!' % step_name)
def _get_dict_from_proto(config):
"""Helper function to put all proto fields into a dictionary.
For many preprocessing steps, there's an trivial 1-1 mapping from proto fields
to function arguments. This function automatically populates a dictionary with
the arguments from the proto.
Protos that CANNOT be trivially populated include:
* nested messages.
* steps that check if an optional field is set (ie. where None != 0).
* protos that don't map 1-1 to arguments (ie. list should be reshaped).
* fields requiring additional validation (ie. repeated field has n elements).
Args:
config: A protobuf object that does not violate the conditions above.
Returns:
result_dict: |config| converted into a python dictionary.
"""
result_dict = {}
for field, value in config.ListFields():
result_dict[field.name] = value
return result_dict
# A map from a PreprocessingStep proto config field name to the preprocessing
# function that should be used. The PreprocessingStep proto should be parsable
# with _get_dict_from_proto.
PREPROCESSING_FUNCTION_MAP = {
'normalize_image':
preprocessor.normalize_image,
'random_pixel_value_scale':
preprocessor.random_pixel_value_scale,
'random_image_scale':
preprocessor.random_image_scale,
'random_rgb_to_gray':
preprocessor.random_rgb_to_gray,
'random_adjust_brightness':
preprocessor.random_adjust_brightness,
'random_adjust_contrast':
preprocessor.random_adjust_contrast,
'random_adjust_hue':
preprocessor.random_adjust_hue,
'random_adjust_saturation':
preprocessor.random_adjust_saturation,
'random_distort_color':
preprocessor.random_distort_color,
'random_jitter_boxes':
preprocessor.random_jitter_boxes,
'random_crop_to_aspect_ratio':
preprocessor.random_crop_to_aspect_ratio,
'random_black_patches':
preprocessor.random_black_patches,
'rgb_to_gray':
preprocessor.rgb_to_gray,
'scale_boxes_to_pixel_coordinates': (
preprocessor.scale_boxes_to_pixel_coordinates),
'subtract_channel_mean':
preprocessor.subtract_channel_mean,
'convert_class_logits_to_softmax':
preprocessor.convert_class_logits_to_softmax,
}
# A map to convert from preprocessor_pb2.ResizeImage.Method enum to
# tf.image.ResizeMethod.
RESIZE_METHOD_MAP = {
preprocessor_pb2.ResizeImage.AREA: tf.image.ResizeMethod.AREA,
preprocessor_pb2.ResizeImage.BICUBIC: tf.image.ResizeMethod.BICUBIC,
preprocessor_pb2.ResizeImage.BILINEAR: tf.image.ResizeMethod.BILINEAR,
preprocessor_pb2.ResizeImage.NEAREST_NEIGHBOR: (
tf.image.ResizeMethod.NEAREST_NEIGHBOR),
}
def build(preprocessor_step_config):
"""Builds preprocessing step based on the configuration.
Args:
preprocessor_step_config: PreprocessingStep configuration proto.
Returns:
function, argmap: A callable function and an argument map to call function
with.
Raises:
ValueError: On invalid configuration.
"""
step_type = preprocessor_step_config.WhichOneof('preprocessing_step')
if step_type in PREPROCESSING_FUNCTION_MAP:
preprocessing_function = PREPROCESSING_FUNCTION_MAP[step_type]
step_config = _get_step_config_from_proto(preprocessor_step_config,
step_type)
function_args = _get_dict_from_proto(step_config)
return (preprocessing_function, function_args)
if step_type == 'random_horizontal_flip':
config = preprocessor_step_config.random_horizontal_flip
return (preprocessor.random_horizontal_flip,
{
'keypoint_flip_permutation': tuple(
config.keypoint_flip_permutation),
})
if step_type == 'random_vertical_flip':
config = preprocessor_step_config.random_vertical_flip
return (preprocessor.random_vertical_flip,
{
'keypoint_flip_permutation': tuple(
config.keypoint_flip_permutation),
})
if step_type == 'random_rotation90':
return (preprocessor.random_rotation90, {})
if step_type == 'random_crop_image':
config = preprocessor_step_config.random_crop_image
return (preprocessor.random_crop_image,
{
'min_object_covered': config.min_object_covered,
'aspect_ratio_range': (config.min_aspect_ratio,
config.max_aspect_ratio),
'area_range': (config.min_area, config.max_area),
'overlap_thresh': config.overlap_thresh,
'clip_boxes': config.clip_boxes,
'random_coef': config.random_coef,
})
if step_type == 'random_pad_image':
config = preprocessor_step_config.random_pad_image
min_image_size = None
if (config.HasField('min_image_height') !=
config.HasField('min_image_width')):
raise ValueError('min_image_height and min_image_width should be either '
'both set or both unset.')
if config.HasField('min_image_height'):
min_image_size = (config.min_image_height, config.min_image_width)
max_image_size = None
if (config.HasField('max_image_height') !=
config.HasField('max_image_width')):
raise ValueError('max_image_height and max_image_width should be either '
'both set or both unset.')
if config.HasField('max_image_height'):
max_image_size = (config.max_image_height, config.max_image_width)
pad_color = config.pad_color or None
if pad_color:
if len(pad_color) != 3:
tf.logging.warn('pad_color should have 3 elements (RGB) if set!')
pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32)
return (preprocessor.random_pad_image,
{
'min_image_size': min_image_size,
'max_image_size': max_image_size,
'pad_color': pad_color,
})
if step_type == 'random_absolute_pad_image':
config = preprocessor_step_config.random_absolute_pad_image
max_height_padding = config.max_height_padding or 1
max_width_padding = config.max_width_padding or 1
pad_color = config.pad_color or None
if pad_color:
if len(pad_color) != 3:
tf.logging.warn('pad_color should have 3 elements (RGB) if set!')
pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32)
return (preprocessor.random_absolute_pad_image,
{
'max_height_padding': max_height_padding,
'max_width_padding': max_width_padding,
'pad_color': pad_color,
})
if step_type == 'random_crop_pad_image':
config = preprocessor_step_config.random_crop_pad_image
min_padded_size_ratio = config.min_padded_size_ratio
if min_padded_size_ratio and len(min_padded_size_ratio) != 2:
raise ValueError('min_padded_size_ratio should have 2 elements if set!')
max_padded_size_ratio = config.max_padded_size_ratio
if max_padded_size_ratio and len(max_padded_size_ratio) != 2:
raise ValueError('max_padded_size_ratio should have 2 elements if set!')
pad_color = config.pad_color or None
if pad_color:
if len(pad_color) != 3:
tf.logging.warn('pad_color should have 3 elements (RGB) if set!')
pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32)
kwargs = {
'min_object_covered': config.min_object_covered,
'aspect_ratio_range': (config.min_aspect_ratio,
config.max_aspect_ratio),
'area_range': (config.min_area, config.max_area),
'overlap_thresh': config.overlap_thresh,
'clip_boxes': config.clip_boxes,
'random_coef': config.random_coef,
'pad_color': pad_color,
}
if min_padded_size_ratio:
kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio)
if max_padded_size_ratio:
kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio)
return (preprocessor.random_crop_pad_image, kwargs)
if step_type == 'random_resize_method':
config = preprocessor_step_config.random_resize_method
return (preprocessor.random_resize_method,
{
'target_size': [config.target_height, config.target_width],
})
if step_type == 'resize_image':
config = preprocessor_step_config.resize_image
method = RESIZE_METHOD_MAP[config.method]
return (preprocessor.resize_image,
{
'new_height': config.new_height,
'new_width': config.new_width,
'method': method
})
if step_type == 'random_self_concat_image':
config = preprocessor_step_config.random_self_concat_image
return (preprocessor.random_self_concat_image, {
'concat_vertical_probability': config.concat_vertical_probability,
'concat_horizontal_probability': config.concat_horizontal_probability
})
if step_type == 'ssd_random_crop':
config = preprocessor_step_config.ssd_random_crop
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio)
for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
clip_boxes = [op.clip_boxes for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
return (preprocessor.ssd_random_crop,
{
'min_object_covered': min_object_covered,
'aspect_ratio_range': aspect_ratio_range,
'area_range': area_range,
'overlap_thresh': overlap_thresh,
'clip_boxes': clip_boxes,
'random_coef': random_coef,
})
return (preprocessor.ssd_random_crop, {})
if step_type == 'autoaugment_image':
config = preprocessor_step_config.autoaugment_image
return (preprocessor.autoaugment_image, {
'policy_name': config.policy_name,
})
if step_type == 'drop_label_probabilistically':
config = preprocessor_step_config.drop_label_probabilistically
return (preprocessor.drop_label_probabilistically, {
'dropped_label': config.label,
'drop_probability': config.drop_probability,
})
if step_type == 'remap_labels':
config = preprocessor_step_config.remap_labels
return (preprocessor.remap_labels, {
'original_labels': config.original_labels,
'new_label': config.new_label
})
if step_type == 'ssd_random_crop_pad':
config = preprocessor_step_config.ssd_random_crop_pad
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio)
for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
clip_boxes = [op.clip_boxes for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
min_padded_size_ratio = [tuple(op.min_padded_size_ratio)
for op in config.operations]
max_padded_size_ratio = [tuple(op.max_padded_size_ratio)
for op in config.operations]
pad_color = [(op.pad_color_r, op.pad_color_g, op.pad_color_b)
for op in config.operations]
return (preprocessor.ssd_random_crop_pad,
{
'min_object_covered': min_object_covered,
'aspect_ratio_range': aspect_ratio_range,
'area_range': area_range,
'overlap_thresh': overlap_thresh,
'clip_boxes': clip_boxes,
'random_coef': random_coef,
'min_padded_size_ratio': min_padded_size_ratio,
'max_padded_size_ratio': max_padded_size_ratio,
'pad_color': pad_color,
})
return (preprocessor.ssd_random_crop_pad, {})
if step_type == 'ssd_random_crop_fixed_aspect_ratio':
config = preprocessor_step_config.ssd_random_crop_fixed_aspect_ratio
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
clip_boxes = [op.clip_boxes for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
return (preprocessor.ssd_random_crop_fixed_aspect_ratio,
{
'min_object_covered': min_object_covered,
'aspect_ratio': config.aspect_ratio,
'area_range': area_range,
'overlap_thresh': overlap_thresh,
'clip_boxes': clip_boxes,
'random_coef': random_coef,
})
return (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})
if step_type == 'ssd_random_crop_pad_fixed_aspect_ratio':
config = preprocessor_step_config.ssd_random_crop_pad_fixed_aspect_ratio
kwargs = {}
aspect_ratio = config.aspect_ratio
if aspect_ratio:
kwargs['aspect_ratio'] = aspect_ratio
min_padded_size_ratio = config.min_padded_size_ratio
if min_padded_size_ratio:
if len(min_padded_size_ratio) != 2:
raise ValueError('min_padded_size_ratio should have 2 elements if set!')
kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio)
max_padded_size_ratio = config.max_padded_size_ratio
if max_padded_size_ratio:
if len(max_padded_size_ratio) != 2:
raise ValueError('max_padded_size_ratio should have 2 elements if set!')
kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio)
if config.operations:
kwargs['min_object_covered'] = [op.min_object_covered
for op in config.operations]
kwargs['aspect_ratio_range'] = [(op.min_aspect_ratio, op.max_aspect_ratio)
for op in config.operations]
kwargs['area_range'] = [(op.min_area, op.max_area)
for op in config.operations]
kwargs['overlap_thresh'] = [op.overlap_thresh for op in config.operations]
kwargs['clip_boxes'] = [op.clip_boxes for op in config.operations]
kwargs['random_coef'] = [op.random_coef for op in config.operations]
return (preprocessor.ssd_random_crop_pad_fixed_aspect_ratio, kwargs)
raise ValueError('Unknown preprocessing step.')
| 41.243719
| 80
| 0.687968
|
40b191ca527d2d3382e60f5c7ccde79f85951d0d
| 17,917
|
py
|
Python
|
tests/slack_sdk/web/classes/test_objects.py
|
timgates42/python-slack-sdk
|
6339fbe81031c9aec3f95927ac03706fd31f3544
|
[
"MIT"
] | null | null | null |
tests/slack_sdk/web/classes/test_objects.py
|
timgates42/python-slack-sdk
|
6339fbe81031c9aec3f95927ac03706fd31f3544
|
[
"MIT"
] | null | null | null |
tests/slack_sdk/web/classes/test_objects.py
|
timgates42/python-slack-sdk
|
6339fbe81031c9aec3f95927ac03706fd31f3544
|
[
"MIT"
] | null | null | null |
import copy
import unittest
from typing import Optional, List, Union
from slack_sdk.errors import SlackObjectFormationError
from slack_sdk.models import JsonObject, JsonValidator
from slack_sdk.models.blocks import (
ConfirmObject,
MarkdownTextObject,
Option,
OptionGroup,
PlainTextObject,
)
from slack_sdk.models.messages import (
ChannelLink,
DateLink,
EveryoneLink,
HereLink,
Link,
ObjectLink,
)
from . import STRING_301_CHARS, STRING_51_CHARS
class SimpleJsonObject(JsonObject):
attributes = {"some", "test", "keys"}
def __init__(self):
self.some = "this is"
self.test = "a test"
self.keys = "object"
@JsonValidator("some validation message")
def test_valid(self):
return len(self.test) <= 10
@JsonValidator("this should never fail")
def always_valid_test(self):
return True
class KeyValueObject(JsonObject):
attributes = {"name", "value"}
def __init__(
self, *, name: Optional[str] = None, value: Optional[str] = None,
):
self.name = name
self.value = value
class NestedObject(JsonObject):
attributes = {"initial", "options"}
def __init__(
self,
*,
initial: Union[dict, KeyValueObject],
options: List[Union[dict, KeyValueObject]],
):
self.initial = (
KeyValueObject(**initial) if isinstance(initial, dict) else initial
)
self.options = [
KeyValueObject(**o) if isinstance(o, dict) else o for o in options
]
class JsonObjectTests(unittest.TestCase):
def setUp(self) -> None:
self.good_test_object = SimpleJsonObject()
obj = SimpleJsonObject()
obj.test = STRING_51_CHARS
self.bad_test_object = obj
def test_json_formation(self):
self.assertDictEqual(
self.good_test_object.to_dict(),
{"some": "this is", "test": "a test", "keys": "object"},
)
def test_validate_json_fails(self):
with self.assertRaises(SlackObjectFormationError):
self.bad_test_object.validate_json()
def test_to_dict_performs_validation(self):
with self.assertRaises(SlackObjectFormationError):
self.bad_test_object.to_dict()
def test_get_non_null_attributes(self):
expected = {"name": "something"}
obj = KeyValueObject(name="something", value=None)
obj2 = copy.deepcopy(obj)
self.assertDictEqual(expected, obj.get_non_null_attributes())
self.assertEqual(str(obj2), str(obj))
def test_get_non_null_attributes_nested(self):
expected = {
"initial": {"name": "something"},
"options": [
{"name": "something"},
{"name": "message", "value": "That's great!"},
],
}
obj1 = KeyValueObject(name="something", value=None)
obj2 = KeyValueObject(name="message", value="That's great!")
options = [obj1, obj2]
nested = NestedObject(initial=obj1, options=options)
self.assertEqual(type(obj1), KeyValueObject)
self.assertTrue(hasattr(obj1, "value"))
self.assertEqual(type(nested.initial), KeyValueObject)
self.assertEqual(type(options[0]), KeyValueObject)
self.assertTrue(hasattr(options[0], "value"))
self.assertEqual(type(nested.options[0]), KeyValueObject)
self.assertTrue(hasattr(nested.options[0], "value"))
dict_value = nested.get_non_null_attributes()
self.assertDictEqual(expected, dict_value)
self.assertEqual(type(obj1), KeyValueObject)
self.assertTrue(hasattr(obj1, "value"))
self.assertEqual(type(nested.initial), KeyValueObject)
self.assertEqual(type(options[0]), KeyValueObject)
self.assertTrue(hasattr(options[0], "value"))
self.assertEqual(type(nested.options[0]), KeyValueObject)
self.assertTrue(hasattr(nested.options[0], "value"))
def test_get_non_null_attributes_nested_2(self):
expected = {
"initial": {"name": "something"},
"options": [
{"name": "something"},
{"name": "message", "value": "That's great!"},
],
}
nested = NestedObject(
initial={"name": "something"},
options=[
{"name": "something"},
{"name": "message", "value": "That's great!"},
],
)
self.assertDictEqual(expected, nested.get_non_null_attributes())
class JsonValidatorTests(unittest.TestCase):
def setUp(self) -> None:
self.validator_instance = JsonValidator("message")
self.class_instance = SimpleJsonObject()
def test_isolated_class(self):
def does_nothing():
return False
wrapped = self.validator_instance(does_nothing)
# noinspection PyUnresolvedReferences
self.assertTrue(wrapped.validator)
def test_wrapped_class(self):
for attribute in dir(self.class_instance):
attr = getattr(self.class_instance, attribute, None)
if attribute in ("test_valid", "always_valid_test"):
self.assertTrue(attr.validator)
else:
with self.assertRaises(AttributeError):
# noinspection PyStatementEffect
attr.validator
class LinkTests(unittest.TestCase):
def test_without_text(self):
link = Link(url="http://google.com", text="")
self.assertEqual(f"{link}", "<http://google.com>")
def test_with_text(self):
link = Link(url="http://google.com", text="google")
self.assertEqual(f"{link}", "<http://google.com|google>")
class DateLinkTests(unittest.TestCase):
def setUp(self) -> None:
self.epoch = 1234567890
def test_simple_formation(self):
datelink = DateLink(
date=self.epoch, date_format="{date_long}", fallback=f"{self.epoch}"
)
self.assertEqual(
f"{datelink}", f"<!date^{self.epoch}^{{date_long}}|{self.epoch}>"
)
def test_with_url(self):
datelink = DateLink(
date=self.epoch,
date_format="{date_long}",
link="http://google.com",
fallback=f"{self.epoch}",
)
self.assertEqual(
f"{datelink}",
f"<!date^{self.epoch}^{{date_long}}^http://google.com|{self.epoch}>",
)
class ObjectLinkTests(unittest.TestCase):
def test_channel(self):
objlink = ObjectLink(object_id="C12345")
self.assertEqual(f"{objlink}", "<#C12345>")
def test_group_message(self):
objlink = ObjectLink(object_id="G12345")
self.assertEqual(f"{objlink}", "<#G12345>")
def test_subteam_message(self):
objlink = ObjectLink(object_id="S12345")
self.assertEqual(f"{objlink}", "<!subteam^S12345>")
def test_with_label(self):
objlink = ObjectLink(object_id="C12345", text="abc")
self.assertEqual(f"{objlink}", "<#C12345|abc>")
def test_unknown_prefix(self):
objlink = ObjectLink(object_id="Z12345")
self.assertEqual(f"{objlink}", "<@Z12345>")
class SpecialLinkTests(unittest.TestCase):
def test_channel_link(self):
self.assertEqual(f"{ChannelLink()}", "<!channel|channel>")
def test_here_link(self):
self.assertEqual(f"{HereLink()}", "<!here|here>")
def test_everyone_link(self):
self.assertEqual(f"{EveryoneLink()}", "<!everyone|everyone>")
class PlainTextObjectTests(unittest.TestCase):
def test_basic_json(self):
self.assertDictEqual(
{"text": "some text", "type": "plain_text"},
PlainTextObject(text="some text").to_dict(),
)
self.assertDictEqual(
{"text": "some text", "emoji": False, "type": "plain_text"},
PlainTextObject(text="some text", emoji=False).to_dict(),
)
def test_from_string(self):
plaintext = PlainTextObject(text="some text", emoji=True)
self.assertDictEqual(
plaintext.to_dict(), PlainTextObject.direct_from_string("some text")
)
class MarkdownTextObjectTests(unittest.TestCase):
def test_basic_json(self):
self.assertDictEqual(
{"text": "some text", "type": "mrkdwn"},
MarkdownTextObject(text="some text").to_dict(),
)
self.assertDictEqual(
{"text": "some text", "verbatim": True, "type": "mrkdwn"},
MarkdownTextObject(text="some text", verbatim=True).to_dict(),
)
def test_from_string(self):
markdown = MarkdownTextObject(text="some text")
self.assertDictEqual(
markdown.to_dict(), MarkdownTextObject.direct_from_string("some text")
)
class ConfirmObjectTests(unittest.TestCase):
def test_basic_json(self):
expected = {
"confirm": {"emoji": True, "text": "Yes", "type": "plain_text"},
"deny": {"emoji": True, "text": "No", "type": "plain_text"},
"text": {"text": "are you sure?", "type": "mrkdwn"},
"title": {"emoji": True, "text": "some title", "type": "plain_text"},
}
simple_object = ConfirmObject(title="some title", text="are you sure?")
self.assertDictEqual(expected, simple_object.to_dict())
self.assertDictEqual(expected, simple_object.to_dict("block"))
self.assertDictEqual(
{
"text": "are you sure?",
"title": "some title",
"ok_text": "Okay",
"dismiss_text": "Cancel",
},
simple_object.to_dict("action"),
)
def test_confirm_overrides(self):
confirm = ConfirmObject(
title="some title",
text="are you sure?",
confirm="I'm really sure",
deny="Nevermind",
)
expected = {
"confirm": {"text": "I'm really sure", "type": "plain_text", "emoji": True},
"deny": {"text": "Nevermind", "type": "plain_text", "emoji": True},
"text": {"text": "are you sure?", "type": "mrkdwn"},
"title": {"text": "some title", "type": "plain_text", "emoji": True},
}
self.assertDictEqual(expected, confirm.to_dict())
self.assertDictEqual(expected, confirm.to_dict("block"))
self.assertDictEqual(
{
"text": "are you sure?",
"title": "some title",
"ok_text": "I'm really sure",
"dismiss_text": "Nevermind",
},
confirm.to_dict("action"),
)
def test_passing_text_objects(self):
direct_construction = ConfirmObject(title="title", text="Are you sure?")
mrkdwn = MarkdownTextObject(text="Are you sure?")
preconstructed = ConfirmObject(title="title", text=mrkdwn)
self.assertDictEqual(direct_construction.to_dict(), preconstructed.to_dict())
plaintext = PlainTextObject(text="Are you sure?", emoji=False)
passed_plaintext = ConfirmObject(title="title", text=plaintext)
self.assertDictEqual(
{
"confirm": {"emoji": True, "text": "Yes", "type": "plain_text"},
"deny": {"emoji": True, "text": "No", "type": "plain_text"},
"text": {"emoji": False, "text": "Are you sure?", "type": "plain_text"},
"title": {"emoji": True, "text": "title", "type": "plain_text"},
},
passed_plaintext.to_dict(),
)
def test_title_length(self):
with self.assertRaises(SlackObjectFormationError):
ConfirmObject(title=STRING_301_CHARS, text="Are you sure?").to_dict()
def test_text_length(self):
with self.assertRaises(SlackObjectFormationError):
ConfirmObject(title="title", text=STRING_301_CHARS).to_dict()
def test_text_length_with_object(self):
with self.assertRaises(SlackObjectFormationError):
plaintext = PlainTextObject(text=STRING_301_CHARS)
ConfirmObject(title="title", text=plaintext).to_dict()
with self.assertRaises(SlackObjectFormationError):
markdown = MarkdownTextObject(text=STRING_301_CHARS)
ConfirmObject(title="title", text=markdown).to_dict()
def test_confirm_length(self):
with self.assertRaises(SlackObjectFormationError):
ConfirmObject(
title="title", text="Are you sure?", confirm=STRING_51_CHARS
).to_dict()
def test_deny_length(self):
with self.assertRaises(SlackObjectFormationError):
ConfirmObject(
title="title", text="Are you sure?", deny=STRING_51_CHARS
).to_dict()
class OptionTests(unittest.TestCase):
def setUp(self) -> None:
self.common = Option(label="an option", value="option_1")
def test_block_style_json(self):
expected = {
"text": {"type": "plain_text", "text": "an option", "emoji": True},
"value": "option_1",
}
self.assertDictEqual(expected, self.common.to_dict("block"))
self.assertDictEqual(expected, self.common.to_dict())
def test_dialog_style_json(self):
expected = {"label": "an option", "value": "option_1"}
self.assertDictEqual(expected, self.common.to_dict("dialog"))
def test_action_style_json(self):
expected = {"text": "an option", "value": "option_1"}
self.assertDictEqual(expected, self.common.to_dict("action"))
def test_from_single_value(self):
option = Option(label="option_1", value="option_1")
self.assertDictEqual(
option.to_dict("text"),
option.from_single_value("option_1").to_dict("text"),
)
def test_label_length(self):
with self.assertRaises(SlackObjectFormationError):
Option(label=STRING_301_CHARS, value="option_1").to_dict("text")
def test_value_length(self):
with self.assertRaises(SlackObjectFormationError):
Option(label="option_1", value=STRING_301_CHARS).to_dict("text")
class OptionGroupTests(unittest.TestCase):
maxDiff = None
def setUp(self) -> None:
self.common_options = [
Option.from_single_value("one"),
Option.from_single_value("two"),
Option.from_single_value("three"),
]
self.common = OptionGroup(label="an option", options=self.common_options)
def test_block_style_json(self):
expected = {
"label": {"emoji": True, "text": "an option", "type": "plain_text"},
"options": [
{
"text": {"emoji": True, "text": "one", "type": "plain_text"},
"value": "one",
},
{
"text": {"emoji": True, "text": "two", "type": "plain_text"},
"value": "two",
},
{
"text": {"emoji": True, "text": "three", "type": "plain_text"},
"value": "three",
},
],
}
self.assertDictEqual(expected, self.common.to_dict("block"))
self.assertDictEqual(expected, self.common.to_dict())
def test_dialog_style_json(self):
self.assertDictEqual(
{
"label": "an option",
"options": [
{"label": "one", "value": "one"},
{"label": "two", "value": "two"},
{"label": "three", "value": "three"},
],
},
self.common.to_dict("dialog"),
)
def test_action_style_json(self):
self.assertDictEqual(
{
"text": "an option",
"options": [
{"text": "one", "value": "one"},
{"text": "two", "value": "two"},
{"text": "three", "value": "three"},
],
},
self.common.to_dict("action"),
)
def test_label_length(self):
with self.assertRaises(SlackObjectFormationError):
OptionGroup(label=STRING_301_CHARS, options=self.common_options).to_dict(
"text"
)
def test_options_length(self):
with self.assertRaises(SlackObjectFormationError):
OptionGroup(label="option_group", options=self.common_options * 34).to_dict(
"text"
)
def test_confirm_style(self):
obj = ConfirmObject.parse(
{
"title": {"type": "plain_text", "text": "Are you sure?"},
"text": {
"type": "mrkdwn",
"text": "Wouldn't you prefer a good game of _chess_?",
},
"confirm": {"type": "plain_text", "text": "Do it"},
"deny": {"type": "plain_text", "text": "Stop, I've changed my mind!"},
"style": "primary",
}
)
obj.validate_json()
self.assertEqual("primary", obj.style)
def test_confirm_style_validation(self):
with self.assertRaises(SlackObjectFormationError):
ConfirmObject.parse(
{
"title": {"type": "plain_text", "text": "Are you sure?"},
"text": {
"type": "mrkdwn",
"text": "Wouldn't you prefer a good game of _chess_?",
},
"confirm": {"type": "plain_text", "text": "Do it"},
"deny": {
"type": "plain_text",
"text": "Stop, I've changed my mind!",
},
"style": "something-wrong",
}
).validate_json()
| 34.522158
| 88
| 0.569459
|
1587a3b711d1b23e2e9cb5334033a6c8a9eb7c69
| 1,082
|
py
|
Python
|
quadpy/_scipy_compat.py
|
whzup/quadpy
|
ca8bd2f9c5a4ae30dc85d8fb79217602bd42525e
|
[
"MIT"
] | null | null | null |
quadpy/_scipy_compat.py
|
whzup/quadpy
|
ca8bd2f9c5a4ae30dc85d8fb79217602bd42525e
|
[
"MIT"
] | null | null | null |
quadpy/_scipy_compat.py
|
whzup/quadpy
|
ca8bd2f9c5a4ae30dc85d8fb79217602bd42525e
|
[
"MIT"
] | null | null | null |
import numpy
from .line_segment import integrate_adaptive
# compatibility for scipy.quad
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.quad.html
def quad(f, a, b, args=(), epsabs=1.49e-08, epsrel=1.49e-08, limit=50):
assert a <= b
# See <https://www.gnu.org/software/gsl/doc/html/integration.html> for the
# variable transformations
if a == -numpy.inf and b == numpy.inf:
# x = (1 - t) / t
# dx / dt = -1 / t**2
a = 0.0
b = 1.0
def g(t):
return (f((1 - t) / t, *args) + f(-(1 - t) / t)) / t ** 2
elif b == numpy.inf:
a_orig = a
a = 0.0
b = 1.0
def g(t):
return f(a_orig + (1 - t) / t, *args) / t ** 2
elif a == -numpy.inf:
b_orig = b
a = 0.0
b = 1.0
def g(t):
return f(b_orig - (1 - t) / t, *args) / t ** 2
else:
def g(x):
return f(x, *args)
return integrate_adaptive(
g, [a, b], eps_abs=epsabs, eps_rel=epsrel, max_num_subintervals=limit
)
| 23.521739
| 80
| 0.501848
|
6d6ba84e24388e112e37f481f00954187052bf59
| 20,647
|
py
|
Python
|
tests/components/homematicip_cloud/test_binary_sensor.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 3
|
2017-09-16T23:34:59.000Z
|
2021-12-20T11:11:27.000Z
|
tests/components/homematicip_cloud/test_binary_sensor.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 52
|
2020-07-14T14:12:26.000Z
|
2022-03-31T06:24:02.000Z
|
tests/components/homematicip_cloud/test_binary_sensor.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 2
|
2021-03-17T11:01:07.000Z
|
2021-08-19T15:21:32.000Z
|
"""Tests for HomematicIP Cloud binary sensor."""
from homematicip.base.enums import SmokeDetectorAlarmType, WindowState
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.homematicip_cloud import DOMAIN as HMIPC_DOMAIN
from homeassistant.components.homematicip_cloud.binary_sensor import (
ATTR_ACCELERATION_SENSOR_MODE,
ATTR_ACCELERATION_SENSOR_NEUTRAL_POSITION,
ATTR_ACCELERATION_SENSOR_SENSITIVITY,
ATTR_ACCELERATION_SENSOR_TRIGGER_ANGLE,
ATTR_MOISTURE_DETECTED,
ATTR_MOTION_DETECTED,
ATTR_POWER_MAINS_FAILURE,
ATTR_PRESENCE_DETECTED,
ATTR_WATER_LEVEL_DETECTED,
ATTR_WINDOW_STATE,
)
from homeassistant.components.homematicip_cloud.generic_entity import (
ATTR_EVENT_DELAY,
ATTR_GROUP_MEMBER_UNREACHABLE,
ATTR_LOW_BATTERY,
ATTR_RSSI_DEVICE,
ATTR_SABOTAGE,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
from .helper import async_manipulate_test_data, get_and_check_entity_basics
async def test_manually_configured_platform(hass):
"""Test that we do not set up an access point."""
assert await async_setup_component(
hass,
BINARY_SENSOR_DOMAIN,
{BINARY_SENSOR_DOMAIN: {"platform": HMIPC_DOMAIN}},
)
assert not hass.data.get(HMIPC_DOMAIN)
async def test_hmip_access_point_cloud_connection_sensor(
hass, default_mock_hap_factory
):
"""Test HomematicipCloudConnectionSensor."""
entity_id = "binary_sensor.access_point_cloud_connection"
entity_name = "Access Point Cloud Connection"
device_model = None
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(hass, hmip_device, "connected", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
async def test_hmip_acceleration_sensor(hass, default_mock_hap_factory):
"""Test HomematicipAccelerationSensor."""
entity_id = "binary_sensor.garagentor"
entity_name = "Garagentor"
device_model = "HmIP-SAM"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_ACCELERATION_SENSOR_MODE] == "FLAT_DECT"
assert ha_state.attributes[ATTR_ACCELERATION_SENSOR_NEUTRAL_POSITION] == "VERTICAL"
assert (
ha_state.attributes[ATTR_ACCELERATION_SENSOR_SENSITIVITY] == "SENSOR_RANGE_4G"
)
assert ha_state.attributes[ATTR_ACCELERATION_SENSOR_TRIGGER_ANGLE] == 45
service_call_counter = len(hmip_device.mock_calls)
await async_manipulate_test_data(
hass, hmip_device, "accelerationSensorTriggered", False
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
assert len(hmip_device.mock_calls) == service_call_counter + 1
await async_manipulate_test_data(
hass, hmip_device, "accelerationSensorTriggered", True
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert len(hmip_device.mock_calls) == service_call_counter + 2
async def test_hmip_tilt_vibration_sensor(hass, default_mock_hap_factory):
"""Test HomematicipTiltVibrationSensor."""
entity_id = "binary_sensor.garage_neigungs_und_erschutterungssensor"
entity_name = "Garage Neigungs- und Erschütterungssensor"
device_model = "HmIP-STV"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_ACCELERATION_SENSOR_MODE] == "FLAT_DECT"
assert (
ha_state.attributes[ATTR_ACCELERATION_SENSOR_SENSITIVITY] == "SENSOR_RANGE_2G"
)
assert ha_state.attributes[ATTR_ACCELERATION_SENSOR_TRIGGER_ANGLE] == 45
service_call_counter = len(hmip_device.mock_calls)
await async_manipulate_test_data(
hass, hmip_device, "accelerationSensorTriggered", False
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
assert len(hmip_device.mock_calls) == service_call_counter + 1
await async_manipulate_test_data(
hass, hmip_device, "accelerationSensorTriggered", True
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert len(hmip_device.mock_calls) == service_call_counter + 2
async def test_hmip_contact_interface(hass, default_mock_hap_factory):
"""Test HomematicipContactInterface."""
entity_id = "binary_sensor.kontakt_schnittstelle_unterputz_1_fach"
entity_name = "Kontakt-Schnittstelle Unterputz – 1-fach"
device_model = "HmIP-FCI1"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "windowState", WindowState.OPEN)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(hass, hmip_device, "windowState", None)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
async def test_hmip_shutter_contact(hass, default_mock_hap_factory):
"""Test HomematicipShutterContact."""
entity_id = "binary_sensor.fenstergriffsensor"
entity_name = "Fenstergriffsensor"
device_model = "HmIP-SRH"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_WINDOW_STATE] == WindowState.TILTED
await async_manipulate_test_data(hass, hmip_device, "windowState", WindowState.OPEN)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_WINDOW_STATE] == WindowState.OPEN
await async_manipulate_test_data(
hass, hmip_device, "windowState", WindowState.CLOSED
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
assert not ha_state.attributes.get(ATTR_WINDOW_STATE)
await async_manipulate_test_data(hass, hmip_device, "windowState", None)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
# test common attributes
assert ha_state.attributes[ATTR_RSSI_DEVICE] == -54
assert not ha_state.attributes.get(ATTR_SABOTAGE)
await async_manipulate_test_data(hass, hmip_device, "sabotage", True)
ha_state = hass.states.get(entity_id)
assert ha_state.attributes[ATTR_SABOTAGE]
async def test_hmip_shutter_contact_optical(hass, default_mock_hap_factory):
"""Test HomematicipShutterContact."""
entity_id = "binary_sensor.sitzplatzture"
entity_name = "Sitzplatzt\u00fcre"
device_model = "HmIP-SWDO-PL"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "windowState", WindowState.OPEN)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(hass, hmip_device, "windowState", None)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
# test common attributes
assert ha_state.attributes[ATTR_RSSI_DEVICE] == -72
assert not ha_state.attributes.get(ATTR_SABOTAGE)
await async_manipulate_test_data(hass, hmip_device, "sabotage", True)
ha_state = hass.states.get(entity_id)
assert ha_state.attributes[ATTR_SABOTAGE]
async def test_hmip_motion_detector(hass, default_mock_hap_factory):
"""Test HomematicipMotionDetector."""
entity_id = "binary_sensor.bewegungsmelder_fur_55er_rahmen_innen"
entity_name = "Bewegungsmelder für 55er Rahmen – innen"
device_model = "HmIP-SMI55"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "motionDetected", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hmip_presence_detector(hass, default_mock_hap_factory):
"""Test HomematicipPresenceDetector."""
entity_id = "binary_sensor.spi_1"
entity_name = "SPI_1"
device_model = "HmIP-SPI"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "presenceDetected", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert not ha_state.attributes.get(ATTR_EVENT_DELAY)
await async_manipulate_test_data(hass, hmip_device, "eventDelay", True)
ha_state = hass.states.get(entity_id)
assert ha_state.attributes[ATTR_EVENT_DELAY]
async def test_hmip_pluggable_mains_failure_surveillance_sensor(
hass, default_mock_hap_factory
):
"""Test HomematicipPresenceDetector."""
entity_id = "binary_sensor.netzausfalluberwachung"
entity_name = "Netzausfallüberwachung"
device_model = "HmIP-PMFS"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(hass, hmip_device, "powerMainsFailure", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
async def test_hmip_smoke_detector(hass, default_mock_hap_factory):
"""Test HomematicipSmokeDetector."""
entity_id = "binary_sensor.rauchwarnmelder"
entity_name = "Rauchwarnmelder"
device_model = "HmIP-SWSD"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(
hass,
hmip_device,
"smokeDetectorAlarmType",
SmokeDetectorAlarmType.PRIMARY_ALARM,
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(
hass,
hmip_device,
"smokeDetectorAlarmType",
None,
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
async def test_hmip_water_detector(hass, default_mock_hap_factory):
"""Test HomematicipWaterDetector."""
entity_id = "binary_sensor.wassersensor"
entity_name = "Wassersensor"
device_model = "HmIP-SWD"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "waterlevelDetected", True)
await async_manipulate_test_data(hass, hmip_device, "moistureDetected", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(hass, hmip_device, "waterlevelDetected", True)
await async_manipulate_test_data(hass, hmip_device, "moistureDetected", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(hass, hmip_device, "waterlevelDetected", False)
await async_manipulate_test_data(hass, hmip_device, "moistureDetected", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
await async_manipulate_test_data(hass, hmip_device, "waterlevelDetected", False)
await async_manipulate_test_data(hass, hmip_device, "moistureDetected", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
async def test_hmip_storm_sensor(hass, default_mock_hap_factory):
"""Test HomematicipStormSensor."""
entity_id = "binary_sensor.weather_sensor_plus_storm"
entity_name = "Weather Sensor – plus Storm"
device_model = "HmIP-SWO-PL"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Weather Sensor – plus"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "storm", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hmip_rain_sensor(hass, default_mock_hap_factory):
"""Test HomematicipRainSensor."""
entity_id = "binary_sensor.wettersensor_pro_raining"
entity_name = "Wettersensor - pro Raining"
device_model = "HmIP-SWO-PR"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Wettersensor - pro"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "raining", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hmip_sunshine_sensor(hass, default_mock_hap_factory):
"""Test HomematicipSunshineSensor."""
entity_id = "binary_sensor.wettersensor_pro_sunshine"
entity_name = "Wettersensor - pro Sunshine"
device_model = "HmIP-SWO-PR"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Wettersensor - pro"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert ha_state.attributes["today_sunshine_duration_in_minutes"] == 100
await async_manipulate_test_data(hass, hmip_device, "sunshine", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
async def test_hmip_battery_sensor(hass, default_mock_hap_factory):
"""Test HomematicipSunshineSensor."""
entity_id = "binary_sensor.wohnungsture_battery"
entity_name = "Wohnungstüre Battery"
device_model = "HMIP-SWDO"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Wohnungstüre"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
await async_manipulate_test_data(hass, hmip_device, "lowBat", True)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hmip_security_zone_sensor_group(hass, default_mock_hap_factory):
"""Test HomematicipSecurityZoneSensorGroup."""
entity_id = "binary_sensor.internal_securityzone"
entity_name = "INTERNAL SecurityZone"
device_model = "HmIP-SecurityZone"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_groups=["INTERNAL"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
assert not ha_state.attributes.get(ATTR_MOTION_DETECTED)
assert not ha_state.attributes.get(ATTR_PRESENCE_DETECTED)
assert not ha_state.attributes.get(ATTR_GROUP_MEMBER_UNREACHABLE)
assert not ha_state.attributes.get(ATTR_SABOTAGE)
assert not ha_state.attributes.get(ATTR_WINDOW_STATE)
await async_manipulate_test_data(hass, hmip_device, "motionDetected", True)
await async_manipulate_test_data(hass, hmip_device, "presenceDetected", True)
await async_manipulate_test_data(hass, hmip_device, "unreach", True)
await async_manipulate_test_data(hass, hmip_device, "sabotage", True)
await async_manipulate_test_data(hass, hmip_device, "windowState", WindowState.OPEN)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_MOTION_DETECTED]
assert ha_state.attributes[ATTR_PRESENCE_DETECTED]
assert ha_state.attributes[ATTR_GROUP_MEMBER_UNREACHABLE]
assert ha_state.attributes[ATTR_SABOTAGE]
assert ha_state.attributes[ATTR_WINDOW_STATE] == WindowState.OPEN
async def test_hmip_security_sensor_group(hass, default_mock_hap_factory):
"""Test HomematicipSecuritySensorGroup."""
entity_id = "binary_sensor.buro_sensors"
entity_name = "Büro Sensors"
device_model = None
mock_hap = await default_mock_hap_factory.async_get_mock_hap(test_groups=["Büro"])
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
await async_manipulate_test_data(
hass,
hmip_device,
"smokeDetectorAlarmType",
SmokeDetectorAlarmType.PRIMARY_ALARM,
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert (
ha_state.attributes["smoke_detector_alarm"]
== SmokeDetectorAlarmType.PRIMARY_ALARM
)
await async_manipulate_test_data(
hass, hmip_device, "smokeDetectorAlarmType", SmokeDetectorAlarmType.IDLE_OFF
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_OFF
assert not ha_state.attributes.get(ATTR_LOW_BATTERY)
assert not ha_state.attributes.get(ATTR_MOTION_DETECTED)
assert not ha_state.attributes.get(ATTR_PRESENCE_DETECTED)
assert not ha_state.attributes.get(ATTR_POWER_MAINS_FAILURE)
assert not ha_state.attributes.get(ATTR_MOISTURE_DETECTED)
assert not ha_state.attributes.get(ATTR_WATER_LEVEL_DETECTED)
assert not ha_state.attributes.get(ATTR_GROUP_MEMBER_UNREACHABLE)
assert not ha_state.attributes.get(ATTR_SABOTAGE)
assert not ha_state.attributes.get(ATTR_WINDOW_STATE)
await async_manipulate_test_data(hass, hmip_device, "lowBat", True)
await async_manipulate_test_data(hass, hmip_device, "motionDetected", True)
await async_manipulate_test_data(hass, hmip_device, "presenceDetected", True)
await async_manipulate_test_data(hass, hmip_device, "powerMainsFailure", True)
await async_manipulate_test_data(hass, hmip_device, "moistureDetected", True)
await async_manipulate_test_data(hass, hmip_device, "waterlevelDetected", True)
await async_manipulate_test_data(hass, hmip_device, "unreach", True)
await async_manipulate_test_data(hass, hmip_device, "sabotage", True)
await async_manipulate_test_data(hass, hmip_device, "windowState", WindowState.OPEN)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_LOW_BATTERY]
assert ha_state.attributes[ATTR_MOTION_DETECTED]
assert ha_state.attributes[ATTR_PRESENCE_DETECTED]
assert ha_state.attributes[ATTR_POWER_MAINS_FAILURE]
assert ha_state.attributes[ATTR_MOISTURE_DETECTED]
assert ha_state.attributes[ATTR_WATER_LEVEL_DETECTED]
assert ha_state.attributes[ATTR_GROUP_MEMBER_UNREACHABLE]
assert ha_state.attributes[ATTR_SABOTAGE]
assert ha_state.attributes[ATTR_WINDOW_STATE] == WindowState.OPEN
await async_manipulate_test_data(
hass,
hmip_device,
"smokeDetectorAlarmType",
SmokeDetectorAlarmType.INTRUSION_ALARM,
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
| 38.023941
| 88
| 0.756623
|
ba1782cc1b4cd7f5781a85a82006c6381210e9e4
| 1,966
|
py
|
Python
|
face_detection/test.py
|
TOBEACODER7/AI_Chatbot_of_BIT
|
6c1e13b79031c56af0116f553aa2a39a31e512e9
|
[
"MIT"
] | 1
|
2022-01-11T02:56:13.000Z
|
2022-01-11T02:56:13.000Z
|
face_detection/test.py
|
TOBEACODER7/AI_Chatbot_of_BIT
|
6c1e13b79031c56af0116f553aa2a39a31e512e9
|
[
"MIT"
] | null | null | null |
face_detection/test.py
|
TOBEACODER7/AI_Chatbot_of_BIT
|
6c1e13b79031c56af0116f553aa2a39a31e512e9
|
[
"MIT"
] | null | null | null |
import face_recognition
import cv2
import numpy as np
def face():
video_capture = cv2.VideoCapture(0)
# Initialize some variables
face_locations = []
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
if face_locations:
return True
# Display the results
for (top, right, bottom, left) in zip(face_locations):
name = 'user'
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
| 33.322034
| 106
| 0.57528
|
0be514e967d92434508145875842b30694abe0fb
| 4,103
|
py
|
Python
|
nmt/serving/inference_service.py
|
whiskyboy/CVAE_GNMT
|
12d01df4b36cb5c44eb719c79cae71d782e1aacd
|
[
"Apache-2.0"
] | null | null | null |
nmt/serving/inference_service.py
|
whiskyboy/CVAE_GNMT
|
12d01df4b36cb5c44eb719c79cae71d782e1aacd
|
[
"Apache-2.0"
] | null | null | null |
nmt/serving/inference_service.py
|
whiskyboy/CVAE_GNMT
|
12d01df4b36cb5c44eb719c79cae71d782e1aacd
|
[
"Apache-2.0"
] | null | null | null |
#coding:utf8
from __future__ import print_function
import os
import time
import argparse
from itertools import groupby
import numpy as np
import tensorflow as tf
from .. import cvae_model
from .. import model_helper
from .. import nmt
from ..utils import nmt_utils
from ..utils import misc_utils as utils
utils.check_tensorflow_version()
class AlphaCommentServer(object):
def __init__(self, model_dir, src_vocab_file=None, tgt_vocab_file=None, args=None):
nmt_parser = argparse.ArgumentParser()
nmt.add_arguments(nmt_parser)
FLAGS, _ = nmt_parser.parse_known_args(args)
default_hparams = nmt.create_hparams(FLAGS)
self.hparams = nmt.create_or_load_hparams(model_dir, default_hparams, FLAGS.hparams_path, save_hparams=False)
self.hparams.beam_width = 0 # force use greedy decoder for inference
if src_vocab_file:
self.hparams.src_vocab_file = src_vocab_file
else:
self.hparams.src_vocab_file = os.path.join(model_dir, "vocab.in")
if tgt_vocab_file:
self.hparams.tgt_vocab_file = tgt_vocab_file
else:
self.hparams.tgt_vocab_file = os.path.join(model_dir, "vocab.out")
self.ckpt = tf.train.latest_checkpoint(model_dir)
self.infer_model = model_helper.create_infer_model(cvae_model.CVAEModel, self.hparams)
self.sess = tf.Session(graph=self.infer_model.graph, config=utils.get_config_proto())
with self.infer_model.graph.as_default():
self.loaded_infer_model = model_helper.load_model(
self.infer_model.model, self.ckpt, self.sess, "infer")
def _refineAndValidateComment(self, comment):
tokens = comment.split()
if "<unk>" in tokens:
return None
refined_tokens = [k for k, g in groupby(tokens)] # remove consecutive duplicated tokens
if len(refined_tokens) != len(set(refined_tokens)): # still has non-consecutive duplicated tokens
return None
return " ".join(refined_tokens)
def comment(self, title, sample_num=30, batch_size=30, lm_score=False):
if batch_size > sample_num:
batch_size = sample_num
infer_data = [title] * sample_num
self.sess.run(
self.infer_model.iterator.initializer,
feed_dict={
self.infer_model.src_placeholder: infer_data,
self.infer_model.batch_size_placeholder: batch_size
})
# Decode
utils.print_out("# Start decoding with title: %s" % title)
start_time = time.time()
comments = {}
while True:
try:
if lm_score:
nmt_outputs, nmt_logp = self.loaded_infer_model.decode_with_logp(self.sess)
else:
nmt_outputs, _ = self.loaded_infer_model.decode(self.sess)
nmt_logp = None
if self.hparams.beam_width > 0:
nmt_outputs = nmt_outputs[0]
if nmt_logp is not None:
nmt_logp = nmt_logp[0]
batch_size = nmt_outputs.shape[0]
for sent_id in range(batch_size):
translation, score = nmt_utils.get_translation_with_score(
nmt_outputs,
nmt_logp,
sent_id,
tgt_eos=self.hparams.eos,
subword_option=self.hparams.subword_option)
utils.print_out("sample comment: %s lm score: %s"%(translation, score))
refined_trans = self._refineAndValidateComment(translation)
if refined_trans:
utils.print_out("refined comment: %s"%refined_trans)
comments[refined_trans] = score
except tf.errors.OutOfRangeError:
utils.print_time(
" done, num of outputs %d"%len(comments), start_time)
break
return sorted(comments.items(), key=lambda x: x[1])
| 41.03
| 117
| 0.612479
|
9adf15b14622b6a080ec1fed08f40cd7a431dbb8
| 2,653
|
py
|
Python
|
src/codexdb/catalog.py
|
itrummer/CodexDB
|
15ab6268c95e8a283b69e17d5fa4cb7589580a27
|
[
"MIT"
] | null | null | null |
src/codexdb/catalog.py
|
itrummer/CodexDB
|
15ab6268c95e8a283b69e17d5fa4cb7589580a27
|
[
"MIT"
] | null | null | null |
src/codexdb/catalog.py
|
itrummer/CodexDB
|
15ab6268c95e8a283b69e17d5fa4cb7589580a27
|
[
"MIT"
] | null | null | null |
'''
Created on Oct 5, 2021
@author: immanueltrummer
'''
import json
class DbCatalog():
""" Information over all databases in database directory. """
def __init__(self, data_dir):
""" Initialize for given database directory.
Args:
data_dir: contains databases and schemata
"""
self.data_dir = data_dir
self.schema_path = f'{data_dir}/schemata.json'
with open(self.schema_path) as file:
self.schemata = json.load(file)
self.table_to_file = {}
def assign_file(self, db_id, table, file_name):
""" Assign file to given table in given database.
Args:
db_id: table is in this database
table: assign file containing data for this table
file_name: name of file containing data
"""
self.table_to_file[(db_id, table)] = file_name
def db_dir(self, db_id):
""" Returns directory storing specific database.
Args:
db_id: name of database
Returns:
path of directory containing database
"""
return f'{self.data_dir}/database/{db_id}'
def file_name(self, db_id, table):
""" Returns name of file storing table data.
Args:
db_id: ID of database
table: name of table
Returns:
name of file storing data
"""
key = (db_id, table)
default = f'{table}.csv'
return self.table_to_file.get(key, default)
def file_path(self, db_id, table):
""" Returns path to file containing data for table.
Args:
db_id: search table in this database
table: name of table
Returns:
path to file containing data for table
"""
db_dir = self.db_dir(db_id)
file_name = self.file_name(db_id, table)
return f'{db_dir}/{file_name}'
def files(self, db_id):
""" Returns names of files containing database tables.
Args:
db_id: unique database identifier
Returns:
list of files associated with database tables
"""
tables = self.schema(db_id)['table_names_original']
return [self.file_name(db_id, t) for t in tables]
def schema(self, db_id):
""" Returns description of database schema.
Args:
db_id: unique name of database
Returns:
JSON object describing database schema
"""
return self.schemata[db_id]
| 28.526882
| 65
| 0.557105
|
a84f71feb2731d57f629437c925c74153afb2ce0
| 790
|
py
|
Python
|
colour/examples/colorimetry/examples_blackbody.py
|
tjdcs/colour
|
09413da71b5da57408eb812797c5db1300d4791a
|
[
"BSD-3-Clause"
] | null | null | null |
colour/examples/colorimetry/examples_blackbody.py
|
tjdcs/colour
|
09413da71b5da57408eb812797c5db1300d4791a
|
[
"BSD-3-Clause"
] | null | null | null |
colour/examples/colorimetry/examples_blackbody.py
|
tjdcs/colour
|
09413da71b5da57408eb812797c5db1300d4791a
|
[
"BSD-3-Clause"
] | null | null | null |
"""Showcases blackbody / planckian radiator computations."""
import colour
from colour.utilities import message_box
message_box("Blackbody / Planckian Radiator Computations")
message_box(
"Computing the spectral distribution of a blackbody at temperature 5000K"
'degrees and converting to "CIE XYZ" tristimulus values.'
)
cmfs = colour.MSDS_CMFS["CIE 1931 2 Degree Standard Observer"]
sd_blackbody = colour.sd_blackbody(5000, cmfs.shape)
print(sd_blackbody)
XYZ = colour.sd_to_XYZ(sd_blackbody, cmfs)
print(XYZ)
print("\n")
message_box(
"Computing the spectral radiance of a blackbody at wavelength 500nm and "
"temperature 5000K degrees."
)
print(colour.colorimetry.blackbody_spectral_radiance(500 * 1e-9, 5000))
print(colour.colorimetry.planck_law(500 * 1e-9, 5000))
| 30.384615
| 77
| 0.777215
|
9a290e35d256e60c76d84eba700a7e6d8ee4612a
| 3,767
|
py
|
Python
|
spikeinterface/extractors/spykingcircusextractors.py
|
vncntprvst/spikeinterface
|
dd5ae94f85fe5d9082b45321d2c96ba316eb4b77
|
[
"MIT"
] | null | null | null |
spikeinterface/extractors/spykingcircusextractors.py
|
vncntprvst/spikeinterface
|
dd5ae94f85fe5d9082b45321d2c96ba316eb4b77
|
[
"MIT"
] | null | null | null |
spikeinterface/extractors/spykingcircusextractors.py
|
vncntprvst/spikeinterface
|
dd5ae94f85fe5d9082b45321d2c96ba316eb4b77
|
[
"MIT"
] | null | null | null |
import numpy as np
from pathlib import Path
from spikeinterface.core import (BaseSorting, BaseSortingSegment)
from spikeinterface.core.core_tools import define_function_from_class
try:
import h5py
HAVE_H5PY = True
except ImportError:
HAVE_H5PY = False
class SpykingCircusSortingExtractor(BaseSorting):
extractor_name = 'SpykingCircusSortingExtractor'
installed = HAVE_H5PY # check at class level if installed or not
is_writable = True
mode = 'folder'
installation_mesg = "To use the SpykingCircusSortingExtractor install h5py: \n\n pip install h5py\n\n"
def __init__(self, folder_path):
assert HAVE_H5PY, self.installation_mesg
spykingcircus_folder = Path(folder_path)
listfiles = spykingcircus_folder.iterdir()
parent_folder = None
result_folder = None
for f in listfiles:
if f.is_dir():
if any([f_.suffix == '.hdf5' for f_ in f.iterdir()]):
parent_folder = spykingcircus_folder
result_folder = f
if parent_folder is None:
parent_folder = spykingcircus_folder.parent
for f in parent_folder.iterdir():
if f.is_dir():
if any([f_.suffix == '.hdf5' for f_ in f.iterdir()]):
result_folder = spykingcircus_folder
assert isinstance(parent_folder, Path) and isinstance(result_folder, Path), "Not a valid spyking circus folder"
# load files
results = None
for f in result_folder.iterdir():
if 'result.hdf5' in str(f):
results = f
if 'result-merged.hdf5' in str(f):
results = f
break
if results is None:
raise Exception(spykingcircus_folder, " is not a spyking circus folder")
# load params
sample_rate = None
for f in parent_folder.iterdir():
if f.suffix == '.params':
sample_rate = _load_sample_rate(f)
assert sample_rate is not None, 'sample rate not found'
with h5py.File(results, 'r') as f_results:
spiketrains = []
unit_ids = []
for temp in f_results['spiketimes'].keys():
spiketrains.append(np.array(f_results['spiketimes'][temp]).astype('int64'))
unit_ids.append(int(temp.split('_')[-1]))
BaseSorting.__init__(self, sample_rate, unit_ids)
self.add_sorting_segment(SpykingcircustSortingSegment(unit_ids, spiketrains))
self._kwargs = {'folder_path': str(Path(folder_path).absolute())}
self.extra_requirements.append('h5py')
class SpykingcircustSortingSegment(BaseSortingSegment):
def __init__(self, unit_ids, spiketrains):
BaseSortingSegment.__init__(self)
self._unit_ids = list(unit_ids)
self._spiketrains = spiketrains
def get_unit_spike_train(self, unit_id, start_frame, end_frame):
unit_index = self._unit_ids.index(unit_id)
times = self._spiketrains[unit_index]
if start_frame is not None:
times = times[times >= start_frame]
if end_frame is not None:
times = times[times < end_frame]
return times
def _load_sample_rate(params_file):
sample_rate = None
with params_file.open('r') as f:
for r in f.readlines():
if 'sampling_rate' in r:
sample_rate = r.split('=')[-1]
if '#' in sample_rate:
sample_rate = sample_rate[:sample_rate.find('#')]
sample_rate = float(sample_rate)
return sample_rate
read_spykingcircus = define_function_from_class(source_class=SpykingCircusSortingExtractor, name="read_spykingcircus")
| 35.537736
| 119
| 0.631272
|
3852a2770067521a91119a6cf1e0e5bbff0d80dd
| 4,397
|
py
|
Python
|
trakt/core/configuration.py
|
omaralvarez/trakt.py
|
93a6beb73cdd37ffb354d2e9c1892dc39d9c4baf
|
[
"MIT"
] | 11
|
2015-02-01T22:22:48.000Z
|
2019-01-24T12:18:07.000Z
|
trakt/core/configuration.py
|
omaralvarez/trakt.py
|
93a6beb73cdd37ffb354d2e9c1892dc39d9c4baf
|
[
"MIT"
] | 3
|
2015-03-26T12:18:02.000Z
|
2019-02-21T08:12:04.000Z
|
trakt/core/configuration.py
|
omaralvarez/trakt.py
|
93a6beb73cdd37ffb354d2e9c1892dc39d9c4baf
|
[
"MIT"
] | 2
|
2016-07-19T22:55:16.000Z
|
2019-01-24T12:19:08.000Z
|
from trakt.core.context_collection import ContextCollection
DEFAULT_HTTP_RETRY = False
DEFAULT_HTTP_MAX_RETRIES = 3
DEFAULT_HTTP_RETRY_SLEEP = 5
DEFAULT_HTTP_TIMEOUT = (6.05, 24)
class ConfigurationManager(object):
def __init__(self):
self.defaults = Configuration(self)
self.stack = ContextCollection([self.defaults])
self.oauth = OAuthConfiguration(self)
@property
def current(self):
return self.stack[-1]
def app(self, name=None, version=None, date=None, id=None):
return Configuration(self).app(name, version, date, id)
def auth(self, login=None, token=None):
return Configuration(self).auth(login, token)
def client(self, id=None, secret=None):
return Configuration(self).client(id, secret)
def http(self, retry=DEFAULT_HTTP_RETRY, max_retries=DEFAULT_HTTP_MAX_RETRIES, retry_sleep=DEFAULT_HTTP_RETRY_SLEEP,
timeout=DEFAULT_HTTP_TIMEOUT):
return Configuration(self).http(retry, max_retries, retry_sleep, timeout)
def get(self, key, default=None):
for x in range(len(self.stack) - 1, -1, -1):
value = self.stack[x].get(key)
if value is not None:
return value
return default
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.current[key] = value
class Configuration(object):
def __init__(self, manager):
self.manager = manager
self.data = {}
self.oauth = OAuthConfiguration(self)
def app(self, name=None, version=None, date=None, id=None):
self.data['app.name'] = name
self.data['app.version'] = version
self.data['app.date'] = date
self.data['app.id'] = id
return self
def auth(self, login=None, token=None):
self.data['auth.login'] = login
self.data['auth.token'] = token
return self
def client(self, id=None, secret=None):
self.data['client.id'] = id
self.data['client.secret'] = secret
return self
def http(self, retry=DEFAULT_HTTP_RETRY, max_retries=DEFAULT_HTTP_MAX_RETRIES, retry_sleep=DEFAULT_HTTP_RETRY_SLEEP,
timeout=DEFAULT_HTTP_TIMEOUT):
self.data['http.retry'] = retry
self.data['http.max_retries'] = max_retries
self.data['http.retry_sleep'] = retry_sleep
self.data['http.timeout'] = timeout
return self
def get(self, key, default=None):
return self.data.get(key, default)
def __enter__(self):
self.manager.stack.append(self)
def __exit__(self, exc_type, exc_val, exc_tb):
item = self.manager.stack.pop()
assert item == self, 'Removed %r from stack, expecting %r' % (item, self)
# Clear old context lists
if len(self.manager.stack) == 1:
self.manager.stack.clear()
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
class OAuthConfiguration(object):
def __init__(self, owner):
self.owner = owner
def __call__(self, token=None, refresh_token=None, created_at=None, expires_in=None, refresh=None):
if type(self.owner) is ConfigurationManager:
return Configuration(self.owner).oauth(token, refresh_token, created_at, expires_in, refresh)
self.owner.data.update({
'oauth.token': token,
'oauth.refresh_token': refresh_token,
'oauth.created_at': created_at,
'oauth.expires_in': expires_in,
'oauth.refresh': refresh
})
return self.owner
def from_response(self, response=None, refresh=None):
if type(self.owner) is ConfigurationManager:
return Configuration(self.owner).oauth.from_response(response, refresh)
if not response:
raise ValueError('Invalid "response" parameter provided to oauth.from_response()')
self.owner.data.update({
'oauth.token': response.get('access_token'),
'oauth.refresh_token': response.get('refresh_token'),
'oauth.created_at': response.get('created_at'),
'oauth.expires_in': response.get('expires_in'),
'oauth.refresh': refresh
})
return self.owner
| 29.510067
| 120
| 0.629065
|
0bb69994ecb0862ac470df7c6de4d02935c2e2a9
| 27,262
|
py
|
Python
|
specs/glxapi.py
|
erich666/apitrace
|
a314508e397c8f1814228d36259ea8708034444e
|
[
"MIT"
] | 1
|
2015-04-28T04:55:47.000Z
|
2015-04-28T04:55:47.000Z
|
specs/glxapi.py
|
erich666/apitrace
|
a314508e397c8f1814228d36259ea8708034444e
|
[
"MIT"
] | null | null | null |
specs/glxapi.py
|
erich666/apitrace
|
a314508e397c8f1814228d36259ea8708034444e
|
[
"MIT"
] | null | null | null |
##########################################################################
#
# Copyright 2008-2009 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""GLX API description."""
from stdapi import *
from glapi import *
VisualID = Alias("VisualID", UInt32)
Display = Opaque("Display *")
Visual = Opaque("Visual *")
Font = Alias("Font", UInt32)
Pixmap = Alias("Pixmap", UInt32)
Window = Alias("Window", UInt32)
Colormap = Alias("Colormap", UInt32)
GLXContext = Opaque("GLXContext")
GLXPixmap = Alias("GLXPixmap", UInt32)
GLXDrawable = Alias("GLXDrawable", UInt32)
GLXFBConfig = Opaque("GLXFBConfig")
GLXFBConfigSGIX = Opaque("GLXFBConfigSGIX")
GLXFBConfigID = Alias("GLXFBConfigID", UInt32)
GLXContextID = Alias("GLXContextID", UInt32)
GLXWindow = Alias("GLXWindow", UInt32)
GLXPbuffer = Alias("GLXPbuffer", UInt32)
GLXPbufferSGIX = Alias("GLXPbufferSGIX", UInt32)
GLXVideoSourceSGIX = Alias("GLXVideoSourceSGIX", UInt32)
GLXVideoDeviceNV = Alias("GLXVideoDeviceNV", UInt32)
GLXVideoCaptureDeviceNV = Alias("GLXVideoCaptureDeviceNV", UInt32)
XVisualInfo = Struct("XVisualInfo", [
(Visual, "visual"),
(VisualID, "visualid"),
(Int, "screen"),
(Int, "depth"),
(Int, "c_class"),
(ULong, "red_mask"),
(ULong, "green_mask"),
(ULong, "blue_mask"),
(Int, "colormap_size"),
(Int, "bits_per_rgb"),
])
Bool = FakeEnum(Int, [
"False",
"True",
])
GLXEnum = FakeEnum(Int, [
#"GLX_USE_GL", # 1
"GLX_BUFFER_SIZE", # 2
"GLX_LEVEL", # 3
"GLX_RGBA", # 4
"GLX_DOUBLEBUFFER", # 5
"GLX_STEREO", # 6
"GLX_AUX_BUFFERS", # 7
"GLX_RED_SIZE", # 8
"GLX_GREEN_SIZE", # 9
"GLX_BLUE_SIZE", # 10
"GLX_ALPHA_SIZE", # 11
"GLX_DEPTH_SIZE", # 12
"GLX_STENCIL_SIZE", # 13
"GLX_ACCUM_RED_SIZE", # 14
"GLX_ACCUM_GREEN_SIZE", # 15
"GLX_ACCUM_BLUE_SIZE", # 16
"GLX_ACCUM_ALPHA_SIZE", # 17
"GLX_CONFIG_CAVEAT", # 0x20
"GLX_X_VISUAL_TYPE", # 0x22
"GLX_TRANSPARENT_TYPE", # 0x23
"GLX_TRANSPARENT_INDEX_VALUE", # 0x24
"GLX_TRANSPARENT_RED_VALUE", # 0x25
"GLX_TRANSPARENT_GREEN_VALUE", # 0x26
"GLX_TRANSPARENT_BLUE_VALUE", # 0x27
"GLX_TRANSPARENT_ALPHA_VALUE", # 0x28
"GLX_CONTEXT_MAJOR_VERSION_ARB", # 0x2091
"GLX_CONTEXT_MINOR_VERSION_ARB", # 0x2092
"GLX_CONTEXT_FLAGS_ARB", # 0x2094
"GLX_BIND_TO_TEXTURE_RGB_EXT", # 0x20D0
"GLX_BIND_TO_TEXTURE_RGBA_EXT", # 0x20D1
"GLX_BIND_TO_MIPMAP_TEXTURE_EXT", # 0x20D2
"GLX_BIND_TO_TEXTURE_TARGETS_EXT", # 0x20D3
"GLX_Y_INVERTED_EXT", # 0x20D4
"GLX_TEXTURE_FORMAT_EXT", # 0x20D5
"GLX_TEXTURE_TARGET_EXT", # 0x20D6
"GLX_MIPMAP_TEXTURE_EXT", # 0x20D7
"GLX_TEXTURE_FORMAT_NONE_EXT", # 0x20D8
"GLX_TEXTURE_FORMAT_RGB_EXT", # 0x20D9
"GLX_TEXTURE_FORMAT_RGBA_EXT", # 0x20DA
"GLX_TEXTURE_1D_EXT", # 0x20DB
"GLX_TEXTURE_2D_EXT", # 0x20DC
"GLX_TEXTURE_RECTANGLE_EXT", # 0x20DD
"GLX_FRONT_LEFT_EXT", # 0x20DE
"GLX_FRONT_RIGHT_EXT", # 0x20DF
"GLX_BACK_LEFT_EXT", # 0x20E0
"GLX_BACK_RIGHT_EXT", # 0x20E1
"GLX_AUX0_EXT", # 0x20E2
"GLX_AUX1_EXT", # 0x20E3
"GLX_AUX2_EXT", # 0x20E4
"GLX_AUX3_EXT", # 0x20E5
"GLX_AUX4_EXT", # 0x20E6
"GLX_AUX5_EXT", # 0x20E7
"GLX_AUX6_EXT", # 0x20E8
"GLX_AUX7_EXT", # 0x20E9
"GLX_AUX8_EXT", # 0x20EA
"GLX_AUX9_EXT", # 0x20EB
"GLX_NONE", # 0x8000
"GLX_SLOW_CONFIG", # 0x8001
"GLX_TRUE_COLOR", # 0x8002
"GLX_DIRECT_COLOR", # 0x8003
"GLX_PSEUDO_COLOR", # 0x8004
"GLX_STATIC_COLOR", # 0x8005
"GLX_GRAY_SCALE", # 0x8006
"GLX_STATIC_GRAY", # 0x8007
"GLX_TRANSPARENT_RGB", # 0x8008
"GLX_TRANSPARENT_INDEX", # 0x8009
"GLX_VISUAL_ID", # 0x800B
"GLX_SCREEN", # 0x800C
"GLX_NON_CONFORMANT_CONFIG", # 0x800D
"GLX_DRAWABLE_TYPE", # 0x8010
"GLX_RENDER_TYPE", # 0x8011
"GLX_X_RENDERABLE", # 0x8012
"GLX_FBCONFIG_ID", # 0x8013
"GLX_RGBA_TYPE", # 0x8014
"GLX_COLOR_INDEX_TYPE", # 0x8015
"GLX_MAX_PBUFFER_WIDTH", # 0x8016
"GLX_MAX_PBUFFER_HEIGHT", # 0x8017
"GLX_MAX_PBUFFER_PIXELS", # 0x8018
"GLX_PRESERVED_CONTENTS", # 0x801B
"GLX_LARGEST_PBUFFER", # 0x801C
"GLX_WIDTH", # 0x801D
"GLX_HEIGHT", # 0x801E
"GLX_EVENT_MASK", # 0x801F
"GLX_DAMAGED", # 0x8020
"GLX_SAVED", # 0x8021
"GLX_WINDOW", # 0x8022
"GLX_PBUFFER", # 0x8023
"GLX_PBUFFER_HEIGHT", # 0x8040
"GLX_PBUFFER_WIDTH", # 0x8041
"GLX_LOSE_CONTEXT_ON_RESET_ARB", # 0x8252
"GLX_NO_RESET_NOTIFICATION_ARB", # 0x8261
"GLX_CONTEXT_PROFILE_MASK_ARB", # 0x9126
"GLX_SAMPLE_BUFFERS", # 100000
"GLX_SAMPLES", # 100001
"GLX_DONT_CARE", # 0xFFFFFFFF
])
GLXError = FakeEnum(Int, [
"GLX_BAD_SCREEN",
"GLX_BAD_ATTRIBUTE",
"GLX_NO_EXTENSION",
"GLX_BAD_VISUAL",
"GLX_BAD_CONTEXT",
"GLX_BAD_VALUE",
"GLX_BAD_ENUM",
])
GLXname = FakeEnum(Int, [
"GLX_VENDOR",
"GLX_VERSION",
"GLX_EXTENSIONS",
])
GLXbuffer = Flags(Int, [
"GLX_WINDOW_BIT",
"GLX_PIXMAP_BIT",
"GLX_PBUFFER_BIT",
"GLX_AUX_BUFFERS_BIT",
"GLX_FRONT_LEFT_BUFFER_BIT",
"GLX_FRONT_RIGHT_BUFFER_BIT",
"GLX_BACK_LEFT_BUFFER_BIT",
"GLX_BACK_RIGHT_BUFFER_BIT",
"GLX_DEPTH_BUFFER_BIT",
"GLX_STENCIL_BUFFER_BIT",
"GLX_ACCUM_BUFFER_BIT",
])
GLXbuffer = Flags(Int, [
"GLX_RGBA_BIT",
"GLX_COLOR_INDEX_BIT",
"GLX_PBUFFER_CLOBBER_MASK",
])
UnusedAttribs = AttribArray(Const(GLXEnum), [])
GLXCommonSizeAttribs = [
('GLX_RED_SIZE', UInt),
('GLX_GREEN_SIZE', UInt),
('GLX_BLUE_SIZE', UInt),
('GLX_ALPHA_SIZE', UInt),
('GLX_DEPTH_SIZE', UInt),
('GLX_STENCIL_SIZE', UInt),
('GLX_ACCUM_RED_SIZE', UInt),
('GLX_ACCUM_GREEN_SIZE', UInt),
('GLX_ACCUM_BLUE_SIZE', UInt),
('GLX_ACCUM_ALPHA_SIZE', UInt)
]
GLXVisualAttribs = AttribArray(GLXEnum, GLXCommonSizeAttribs + [
('GLX_USE_GL', None),
('GLX_BUFFER_SIZE', UInt),
('GLX_LEVEL', Int),
('GLX_RGBA', None),
('GLX_DOUBLEBUFFER', None),
('GLX_STEREO', None),
('GLX_AUX_BUFFERS', UInt),
('GLX_SAMPLE_BUFFERS', UInt),
('GLX_SAMPLES', UInt)],
)
GLXFBConfigCommonAttribs = GLXCommonSizeAttribs + [
('GLX_BUFFER_SIZE', UInt),
('GLX_LEVEL', Int),
('GLX_DOUBLEBUFFER', Bool),
('GLX_STEREO', Bool),
('GLX_AUX_BUFFERS', UInt),
('GLX_SAMPLE_BUFFERS', UInt),
('GLX_SAMPLES', UInt),
('GLX_RENDER_TYPE', Flags(Int, ["GLX_RGBA_BIT", "GLX_COLOR_INDEX_BIT"])),
('GLX_DRAWABLE_TYPE', Flags(Int, ["GLX_WINDOW_BIT", "GLX_PIXMAP_BIT", "GLX_PBUFFER_BIT"])),
('GLX_X_RENDERABLE', Bool),
('GLX_X_VISUAL_TYPE', FakeEnum(Int, ["GLX_TRUE_COLOR", "GLX_DIRECT_COLOR", "GLX_PSEUDO_COLOR", "GLX_STATIC_COLOR"])),
('GLX_CONFIG_CAVEAT', FakeEnum(Int, ["GLX_NONE", "GLX_SLOW_CONFIG", "GLX_NON_CONFORMANT_CONFIG"])),
('GLX_TRANSPARENT_TYPE', FakeEnum(Int, ["GLX_NONE", "GLX_TRANSPARENT_RGB", "GLX_TRANSPARENT_INDEX"])),
('GLX_TRANSPARENT_INDEX_VALUE', Int),
('GLX_TRANSPARENT_RED_VALUE', Int),
('GLX_TRANSPARENT_GREEN_VALUE', Int),
('GLX_TRANSPARENT_BLUE_VALUE', Int),
('GLX_TRANSPARENT_ALPHA_VALUE', Int)
]
GLXFBConfigGLXAttribs = GLXFBConfigCommonAttribs + [
('GLX_FBCONFIG_ID', Int), # an XID, can we do better than int?
('GLX_MAX_PBUFFER_WIDTH', Int),
('GLX_MAX_PBUFFER_HEIGHT', Int),
('GLX_MAX_PBUFFER_PIXELS', Int),
('GLX_VISUAL_ID', Int) # another XID
]
GLXFBConfigAttribs = AttribArray(Const(GLXEnum), GLXFBConfigGLXAttribs)
GLXFBConfigSGIXAttribs = AttribArray(GLXEnum, GLXFBConfigCommonAttribs)
GLXContextARBAttribs = AttribArray(Const(GLXEnum), [
('GLX_RENDER_TYPE', Flags(Int, ["GLX_RGBA_BIT", "GLX_COLOR_INDEX_BIT"])),
('GLX_CONTEXT_MAJOR_VERSION_ARB', Int),
('GLX_CONTEXT_MINOR_VERSION_ARB', Int),
('GLX_CONTEXT_FLAGS_ARB', Flags(Int, ["GLX_CONTEXT_DEBUG_BIT_ARB", "GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB", "GLX_CONTEXT_ROBUST_ACCESS_BIT_ARB"])),
('GLX_CONTEXT_PROFILE_MASK_ARB', Flags(Int, ["GLX_CONTEXT_CORE_PROFILE_BIT_ARB", "GLX_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB"])),
('GLX_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB', GLXEnum),
])
GLXPbufferAttribs = AttribArray(Const(GLXEnum), [
('GLX_PBUFFER_WIDTH', Int),
('GLX_PBUFFER_HEIGHT', Int),
('GLX_LARGEST_PBUFFER', Bool),
('GLX_PRESERVED_CONTENTS', Bool)
])
GLXPbufferSGIXAttribs = AttribArray(GLXEnum, [
('GLX_PRESERVED_CONTENTS_SGIX', Bool),
('GLX_LARGEST_PBUFFER', Bool),
#('GLX_DIGITAL_MEDIA_PBUFFER_SGIX', Bool),
])
glxapi = Module("GLX")
PROC = Opaque("__GLXextFuncPtr")
glxapi.addFunctions([
# GLX
Function(Pointer(XVisualInfo), "glXChooseVisual", [(Display, "dpy"), (Int, "screen"), (GLXVisualAttribs, "attribList")]),
Function(GLXContext, "glXCreateContext", [(Display, "dpy"), (Pointer(XVisualInfo), "vis"), (GLXContext, "shareList"), (Bool, "direct")]),
Function(Void, "glXDestroyContext", [(Display, "dpy"), (GLXContext, "ctx")]),
Function(Bool, "glXMakeCurrent", [(Display, "dpy"), (GLXDrawable, "drawable"), (GLXContext, "ctx")]),
Function(Void, "glXCopyContext", [(Display, "dpy"), (GLXContext, "src"), (GLXContext, "dst"),
(ULong, "mask")]),
Function(Void, "glXSwapBuffers", [(Display, "dpy"), (GLXDrawable, "drawable")]),
Function(GLXPixmap, "glXCreateGLXPixmap", [(Display, "dpy"), (Pointer(XVisualInfo), "visual"),
(Pixmap, "pixmap")]),
Function(Void, "glXDestroyGLXPixmap", [(Display, "dpy"), (GLXPixmap, "pixmap")]),
Function(Bool, "glXQueryExtension", [(Display, "dpy"), Out(Pointer(Int), "errorb"), Out(Pointer(Int), "event")]),
Function(Bool, "glXQueryVersion", [(Display, "dpy"), Out(Pointer(Int), "maj"), Out(Pointer(Int), "min")]),
Function(Bool, "glXIsDirect", [(Display, "dpy"), (GLXContext, "ctx")]),
Function(GLXError, "glXGetConfig", [(Display, "dpy"), (Pointer(XVisualInfo), "visual"),
(GLXEnum, "attrib"), Out(Pointer(Int), "value")]),
Function(GLXContext, "glXGetCurrentContext", [], sideeffects=False),
Function(GLXDrawable, "glXGetCurrentDrawable", [], sideeffects=False),
Function(Void, "glXWaitGL", []),
Function(Void, "glXWaitX", []),
Function(Void, "glXUseXFont", [(Font, "font"), (Int, "first"), (Int, "count"), (Int, "list")]),
# GLX 1.1 and later
Function((ConstCString), "glXQueryExtensionsString", [(Display, "dpy"), (Int, "screen")], sideeffects=False),
Function((ConstCString), "glXQueryServerString", [(Display, "dpy"), (Int, "screen"), (GLXname, "name")], sideeffects=False),
Function((ConstCString), "glXGetClientString", [(Display, "dpy"), (GLXname, "name")], sideeffects=False),
# GLX 1.2 and later
Function(Display, "glXGetCurrentDisplay", [], sideeffects=False),
# GLX 1.3 and later
Function(Array(GLXFBConfig, "*nitems"), "glXChooseFBConfig", [(Display, "dpy"), (Int, "screen"), (GLXFBConfigAttribs, "attribList"), Out(Pointer(Int), "nitems")]),
Function(Int, "glXGetFBConfigAttrib", [(Display, "dpy"), (GLXFBConfig, "config"), (GLXEnum, "attribute"), Out(Pointer(Int), "value")]),
Function(Array(GLXFBConfig, "*nelements"), "glXGetFBConfigs", [(Display, "dpy"), (Int, "screen"),
Out(Pointer(Int), "nelements")]),
Function(Pointer(XVisualInfo), "glXGetVisualFromFBConfig", [(Display, "dpy"),
(GLXFBConfig, "config")]),
Function(GLXWindow, "glXCreateWindow", [(Display, "dpy"), (GLXFBConfig, "config"),
(Window, "win"), (UnusedAttribs, "attribList")]),
Function(Void, "glXDestroyWindow", [(Display, "dpy"), (GLXWindow, "window")]),
Function(GLXPixmap, "glXCreatePixmap", [(Display, "dpy"), (GLXFBConfig, "config"),
(Pixmap, "pixmap"), (UnusedAttribs, "attribList")]),
Function(Void, "glXDestroyPixmap", [(Display, "dpy"), (GLXPixmap, "pixmap")]),
Function(GLXPbuffer, "glXCreatePbuffer", [(Display, "dpy"), (GLXFBConfig, "config"),
(GLXPbufferAttribs, "attribList")]),
Function(Void, "glXDestroyPbuffer", [(Display, "dpy"), (GLXPbuffer, "pbuf")]),
Function(Void, "glXQueryDrawable", [(Display, "dpy"), (GLXDrawable, "draw"), (GLXEnum, "attribute"),
Out(Pointer(UInt), "value")]),
Function(GLXContext, "glXCreateNewContext", [(Display, "dpy"), (GLXFBConfig, "config"),
(GLXEnum, "renderType"), (GLXContext, "shareList"),
(Bool, "direct")]),
Function(Bool, "glXMakeContextCurrent", [(Display, "dpy"), (GLXDrawable, "draw"),
(GLXDrawable, "read"), (GLXContext, "ctx")]),
Function(GLXDrawable, "glXGetCurrentReadDrawable", []),
Function(Int, "glXQueryContext", [(Display, "dpy"), (GLXContext, "ctx"), (GLXEnum, "attribute"),
Out(Pointer(Int), "value")]),
Function(Void, "glXSelectEvent", [(Display, "dpy"), (GLXDrawable, "drawable"),
(ULong, "mask")]),
Function(Void, "glXGetSelectedEvent", [(Display, "dpy"), (GLXDrawable, "drawable"),
Out(Pointer(ULong), "mask")]),
# GLX_ARB_create_context
Function(GLXContext, "glXCreateContextAttribsARB", [(Display, "dpy"), (GLXFBConfig, "config"),
(GLXContext, "share_context"), (Bool, "direct"),
(GLXContextARBAttribs, "attrib_list")]),
# GLX_SGI_swap_control
Function(Int, "glXSwapIntervalSGI", [(Int, "interval")]),
# GLX_SGI_video_sync
Function(Int, "glXGetVideoSyncSGI", [(OpaquePointer(UInt), "count")]),
Function(Int, "glXWaitVideoSyncSGI", [(Int, "divisor"), (Int, "remainder"), (OpaquePointer(UInt), "count")]),
# GLX_SGI_make_current_read
Function(Bool, "glXMakeCurrentReadSGI", [(Display, "dpy"), (GLXDrawable, "draw"), (GLXDrawable, "read"), (GLXContext, "ctx")]),
Function(GLXDrawable, "glXGetCurrentReadDrawableSGI", []),
# GLX_SGIX_video_source
#Function(GLXVideoSourceSGIX, "glXCreateGLXVideoSourceSGIX", [(Display, "display"), (Int, "screen"), (VLServer, "server"), (VLPath, "path"), (Int, "nodeClass"), (VLNode, "drainNode")]),
#Function(Void, "glXDestroyGLXVideoSourceSGIX", [(Display, "dpy"), (GLXVideoSourceSGIX, "glxvideosource")]),
# GLX_EXT_import_context
Function(Display, "glXGetCurrentDisplayEXT", []),
Function(Int, "glXQueryContextInfoEXT", [(Display, "dpy"), (GLXContext, "context"), (Int, "attribute"), (OpaquePointer(Int), "value")]),
Function(GLXContextID, "glXGetContextIDEXT", [(Const(GLXContext), "context")]),
Function(GLXContext, "glXImportContextEXT", [(Display, "dpy"), (GLXContextID, "contextID")]),
Function(Void, "glXFreeContextEXT", [(Display, "dpy"), (GLXContext, "context")]),
# GLX_SGIX_fbconfig
Function(Int, "glXGetFBConfigAttribSGIX", [(Display, "dpy"), (GLXFBConfigSGIX, "config"), (Int, "attribute"), Out(Pointer(Int), "value")]),
Function(OpaquePointer(GLXFBConfigSGIX), "glXChooseFBConfigSGIX", [(Display, "dpy"), (Int, "screen"), (GLXFBConfigSGIXAttribs, "attrib_list"), Out(Pointer(Int), "nelements")]),
Function(GLXPixmap, "glXCreateGLXPixmapWithConfigSGIX", [(Display, "dpy"), (GLXFBConfigSGIX, "config"), (Pixmap, "pixmap")]),
Function(GLXContext, "glXCreateContextWithConfigSGIX", [(Display, "dpy"), (GLXFBConfigSGIX, "config"), (Int, "render_type"), (GLXContext, "share_list"), (Bool, "direct")]),
Function(Pointer(XVisualInfo), "glXGetVisualFromFBConfigSGIX", [(Display, "dpy"), (GLXFBConfigSGIX, "config")]),
Function(GLXFBConfigSGIX, "glXGetFBConfigFromVisualSGIX", [(Display, "dpy"), Out(Pointer(XVisualInfo), "vis")]),
# GLX_SGIX_pbuffer
Function(GLXPbufferSGIX, "glXCreateGLXPbufferSGIX", [(Display, "dpy"), (GLXFBConfigSGIX, "config"), (UInt, "width"), (UInt, "height"), (GLXPbufferSGIXAttribs, "attrib_list")]),
Function(Void, "glXDestroyGLXPbufferSGIX", [(Display, "dpy"), (GLXPbufferSGIX, "pbuf")]),
Function(Int, "glXQueryGLXPbufferSGIX", [(Display, "dpy"), (GLXPbufferSGIX, "pbuf"), (Int, "attribute"), Out(Pointer(UInt), "value")]),
Function(Void, "glXSelectEventSGIX", [(Display, "dpy"), (GLXDrawable, "drawable"), (ULong, "mask")]),
Function(Void, "glXGetSelectedEventSGIX", [(Display, "dpy"), (GLXDrawable, "drawable"), Out(Pointer(ULong), "mask")]),
# GLX_SGI_cushion
Function(Void, "glXCushionSGI", [(Display, "dpy"), (Window, "window"), (Float, "cushion")]),
# GLX_SGIX_video_resize
Function(Int, "glXBindChannelToWindowSGIX", [(Display, "display"), (Int, "screen"), (Int, "channel"), (Window, "window")]),
Function(Int, "glXChannelRectSGIX", [(Display, "display"), (Int, "screen"), (Int, "channel"), (Int, "x"), (Int, "y"), (Int, "w"), (Int, "h")]),
Function(Int, "glXQueryChannelRectSGIX", [(Display, "display"), (Int, "screen"), (Int, "channel"), (OpaquePointer(Int), "dx"), (OpaquePointer(Int), "dy"), (OpaquePointer(Int), "dw"), (OpaquePointer(Int), "dh")]),
Function(Int, "glXQueryChannelDeltasSGIX", [(Display, "display"), (Int, "screen"), (Int, "channel"), (OpaquePointer(Int), "x"), (OpaquePointer(Int), "y"), (OpaquePointer(Int), "w"), (OpaquePointer(Int), "h")]),
Function(Int, "glXChannelRectSyncSGIX", [(Display, "display"), (Int, "screen"), (Int, "channel"), (GLenum, "synctype")]),
# GLX_SGIX_dmbuffer
#Function(Bool, "glXAssociateDMPbufferSGIX", [(Display, "dpy"), (GLXPbufferSGIX, "pbuffer"), (OpaquePointer(DMparams), "params"), (DMbuffer, "dmbuffer")]),
# GLX_SGIX_swap_group
Function(Void, "glXJoinSwapGroupSGIX", [(Display, "dpy"), (GLXDrawable, "drawable"), (GLXDrawable, "member")]),
# GLX_SGIX_swap_barrier
Function(Void, "glXBindSwapBarrierSGIX", [(Display, "dpy"), (GLXDrawable, "drawable"), (Int, "barrier")]),
Function(Bool, "glXQueryMaxSwapBarriersSGIX", [(Display, "dpy"), (Int, "screen"), (OpaquePointer(Int), "max")]),
# GLX_SUN_get_transparent_index
#Function(Status, "glXGetTransparentIndexSUN", [(Display, "dpy"), (Window, "overlay"), (Window, "underlay"), (OpaquePointer(Long), "pTransparentIndex")]),
# GLX_MESA_copy_sub_buffer
Function(Void, "glXCopySubBufferMESA", [(Display, "dpy"), (GLXDrawable, "drawable"), (Int, "x"), (Int, "y"), (Int, "width"), (Int, "height")]),
# GLX_MESA_pixmap_colormap
Function(GLXPixmap, "glXCreateGLXPixmapMESA", [(Display, "dpy"), (Pointer(XVisualInfo), "visual"), (Pixmap, "pixmap"), (Colormap, "cmap")]),
# GLX_MESA_release_buffers
Function(Bool, "glXReleaseBuffersMESA", [(Display, "dpy"), (GLXDrawable, "drawable")]),
# GLX_MESA_set_3dfx_mode
Function(Bool, "glXSet3DfxModeMESA", [(Int, "mode")]),
# GLX_MESA_swap_control
Function(Int, "glXSwapIntervalMESA", [(UInt, "interval")]),
Function(Int, "glXGetSwapIntervalMESA", [], sideeffects=False),
# GLX_OML_sync_control
Function(Bool, "glXGetSyncValuesOML", [(Display, "dpy"), (GLXDrawable, "drawable"), (OpaquePointer(Int64), "ust"), (OpaquePointer(Int64), "msc"), (OpaquePointer(Int64), "sbc")]),
Function(Bool, "glXGetMscRateOML", [(Display, "dpy"), (GLXDrawable, "drawable"), (OpaquePointer(Int32), "numerator"), (OpaquePointer(Int32), "denominator")]),
Function(Int64, "glXSwapBuffersMscOML", [(Display, "dpy"), (GLXDrawable, "drawable"), (Int64, "target_msc"), (Int64, "divisor"), (Int64, "remainder")]),
Function(Bool, "glXWaitForMscOML", [(Display, "dpy"), (GLXDrawable, "drawable"), (Int64, "target_msc"), (Int64, "divisor"), (Int64, "remainder"), (OpaquePointer(Int64), "ust"), (OpaquePointer(Int64), "msc"), (OpaquePointer(Int64), "sbc")]),
Function(Bool, "glXWaitForSbcOML", [(Display, "dpy"), (GLXDrawable, "drawable"), (Int64, "target_sbc"), (OpaquePointer(Int64), "ust"), (OpaquePointer(Int64), "msc"), (OpaquePointer(Int64), "sbc")]),
# GLX_SGIX_hyperpipe
#Function(OpaquePointer(GLXHyperpipeNetworkSGIX), "glXQueryHyperpipeNetworkSGIX", [(Display, "dpy"), (OpaquePointer(Int), "npipes")]),
#Function(Int, "glXHyperpipeConfigSGIX", [(Display, "dpy"), (Int, "networkId"), (Int, "npipes"), (OpaquePointer(GLXHyperpipeConfigSGIX), "cfg"), (OpaquePointer(Int), "hpId")]),
#Function(OpaquePointer(GLXHyperpipeConfigSGIX), "glXQueryHyperpipeConfigSGIX", [(Display, "dpy"), (Int, "hpId"), (OpaquePointer(Int), "npipes")]),
#Function(Int, "glXDestroyHyperpipeConfigSGIX", [(Display, "dpy"), (Int, "hpId")]),
#Function(Int, "glXBindHyperpipeSGIX", [(Display, "dpy"), (Int, "hpId")]),
#Function(Int, "glXQueryHyperpipeBestAttribSGIX", [(Display, "dpy"), (Int, "timeSlice"), (Int, "attrib"), (Int, "size"), (OpaquePointer(Void), "attribList"), (OpaquePointer(Void), "returnAttribList")]),
#Function(Int, "glXHyperpipeAttribSGIX", [(Display, "dpy"), (Int, "timeSlice"), (Int, "attrib"), (Int, "size"), (OpaquePointer(Void), "attribList")]),
#Function(Int, "glXQueryHyperpipeAttribSGIX", [(Display, "dpy"), (Int, "timeSlice"), (Int, "attrib"), (Int, "size"), (OpaquePointer(Void), "returnAttribList")]),
# GLX_MESA_agp_offset
Function(UInt, "glXGetAGPOffsetMESA", [(OpaquePointer(Const(Void)), "pointer")]),
# EXT_texture_from_pixmap
Function(Void, "glXBindTexImageEXT", [(Display, "display"), (GLXDrawable, "drawable"), (GLXEnum, "buffer"), (GLXFBConfigAttribs, "attrib_list")]),
Function(Void, "glXReleaseTexImageEXT", [(Display, "display"), (GLXDrawable, "drawable"), (GLXEnum, "buffer")]),
# GLX_NV_present_video
Function(Array(UInt, "(nelements ? *nelements : 0)"), "glXEnumerateVideoDevicesNV", [(Display, "dpy"), (Int, "screen"), Out(Pointer(Int), "nelements")]),
Function(Int, "glXBindVideoDeviceNV", [(Display, "dpy"), (UInt, "video_slot"), (UInt, "video_device"), (UnusedAttribs, "attrib_list")]),
# GLX_NV_video_output
Function(Int, "glXGetVideoDeviceNV", [(Display, "dpy"), (Int, "screen"), (Int, "numVideoDevices"), Out(Array(GLXVideoDeviceNV, "numVideoDevices"), "pVideoDevice")]),
Function(Int, "glXReleaseVideoDeviceNV", [(Display, "dpy"), (Int, "screen"), (GLXVideoDeviceNV, "VideoDevice")]),
Function(Int, "glXBindVideoImageNV", [(Display, "dpy"), (GLXVideoDeviceNV, "VideoDevice"), (GLXPbuffer, "pbuf"), (Int, "iVideoBuffer")]),
Function(Int, "glXReleaseVideoImageNV", [(Display, "dpy"), (GLXPbuffer, "pbuf")]),
Function(Int, "glXSendPbufferToVideoNV", [(Display, "dpy"), (GLXPbuffer, "pbuf"), (Int, "iBufferType"), Out(Pointer(ULong), "pulCounterPbuffer"), (GLboolean, "bBlock")]),
Function(Int, "glXGetVideoInfoNV", [(Display, "dpy"), (Int, "screen"), (GLXVideoDeviceNV, "VideoDevice"), Out(Pointer(ULong), "pulCounterOutputPbuffer"), Out(Pointer(ULong), "pulCounterOutputVideo")], sideeffects=False),
# GLX_NV_swap_group
Function(Bool, "glXJoinSwapGroupNV", [(Display, "dpy"), (GLXDrawable, "drawable"), (GLuint, "group")]),
Function(Bool, "glXBindSwapBarrierNV", [(Display, "dpy"), (GLuint, "group"), (GLuint, "barrier")]),
Function(Bool, "glXQuerySwapGroupNV", [(Display, "dpy"), (GLXDrawable, "drawable"), (OpaquePointer(GLuint), "group"), (OpaquePointer(GLuint), "barrier")]),
Function(Bool, "glXQueryMaxSwapGroupsNV", [(Display, "dpy"), (Int, "screen"), (OpaquePointer(GLuint), "maxGroups"), (OpaquePointer(GLuint), "maxBarriers")]),
Function(Bool, "glXQueryFrameCountNV", [(Display, "dpy"), (Int, "screen"), (OpaquePointer(GLuint), "count")]),
Function(Bool, "glXResetFrameCountNV", [(Display, "dpy"), (Int, "screen")]),
# GLX_NV_video_capture
Function(Int, "glXBindVideoCaptureDeviceNV", [(Display, "dpy"), (UInt, "video_capture_slot"), (GLXVideoCaptureDeviceNV, "device")]),
Function(Array(GLXVideoCaptureDeviceNV, "(nelements ? *nelements : 0)"), "glXEnumerateVideoCaptureDevicesNV", [(Display, "dpy"), (Int, "screen"), Out(Pointer(Int), "nelements")]),
Function(Void, "glXLockVideoCaptureDeviceNV", [(Display, "dpy"), (GLXVideoCaptureDeviceNV, "device")]),
Function(Int, "glXQueryVideoCaptureDeviceNV", [(Display, "dpy"), (GLXVideoCaptureDeviceNV, "device"), (Int, "attribute"), Out(Pointer(Int), "value")], sideeffects=False),
Function(Void, "glXReleaseVideoCaptureDeviceNV", [(Display, "dpy"), (GLXVideoCaptureDeviceNV, "device")]),
# GLX_EXT_swap_control
Function(Void, "glXSwapIntervalEXT", [(Display, "dpy"), (GLXDrawable, "drawable"), (Int, "interval")]),
# GLX_NV_copy_image
Function(Void, "glXCopyImageSubDataNV", [(Display, "dpy"), (GLXContext, "srcCtx"), (GLuint, "srcName"), (GLenum, "srcTarget"), (GLint, "srcLevel"), (GLint, "srcX"), (GLint, "srcY"), (GLint, "srcZ"), (GLXContext, "dstCtx"), (GLuint, "dstName"), (GLenum, "dstTarget"), (GLint, "dstLevel"), (GLint, "dstX"), (GLint, "dstY"), (GLint, "dstZ"), (GLsizei, "width"), (GLsizei, "height"), (GLsizei, "depth")]),
# GLX_NV_vertex_array_range
Function(OpaquePointer(Void), "glXAllocateMemoryNV", [(GLsizei, "size"), (GLfloat, "readfreq"), (GLfloat, "writefreq"), (GLfloat, "priority")]),
Function(Void, "glXFreeMemoryNV", [(OpaquePointer(Void), "pointer")]),
# Must be last
Function(PROC, "glXGetProcAddressARB", [(String(Const(GLubyte)), "procName")]),
Function(PROC, "glXGetProcAddress", [(String(Const(GLubyte)), "procName")]),
])
# To prevent collision with stdapi.Bool
del Bool
| 53.350294
| 405
| 0.642066
|
80d2b4bb55c85addc2b081273aab1b59feb75d85
| 1,595
|
py
|
Python
|
nodejs/setup.py
|
oberhamsi/FrameworkBenchmarks
|
660a66d51a9aad10b43c0660208fb13c098121af
|
[
"BSD-3-Clause"
] | 4
|
2015-01-22T02:13:03.000Z
|
2018-06-13T12:02:46.000Z
|
frameworks/JavaScript/nodejs/setup.py
|
ratpack/FrameworkBenchmarks
|
81604309e46e382fe2ffb7970a87d728f20c8be6
|
[
"BSD-3-Clause"
] | null | null | null |
frameworks/JavaScript/nodejs/setup.py
|
ratpack/FrameworkBenchmarks
|
81604309e46e382fe2ffb7970a87d728f20c8be6
|
[
"BSD-3-Clause"
] | null | null | null |
import subprocess
import sys
import setup_util
import os
def start(args, logfile, errfile):
setup_util.replace_text("nodejs/hello.js", "mongodb:\/\/.*\/hello_world", "mongodb://" + args.database_host + "/hello_world")
setup_util.replace_text("nodejs/hello.js", "localhost", args.database_host)
try:
npm(logfile, errfile)
subprocess.Popen("node hello.js", shell=True, cwd="nodejs", stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def npm(logfile, errfile):
if os.name == 'nt':
subprocess.check_call("copy package.json package.json.dist /y > NUL", shell=True, cwd="nodejs", stderr=errfile, stdout=logfile)
setup_util.replace_text("nodejs/package.json", ".*mysql.*", "")
setup_util.replace_text("nodejs/package.json", ".*mapper.*", "")
try:
subprocess.check_call("npm install", shell=True, cwd="nodejs", stderr=errfile, stdout=logfile)
finally:
if os.name == 'nt':
subprocess.check_call("del package.json", shell=True, cwd="nodejs")
subprocess.check_call("ren package.json.dist package.json", shell=True, cwd="nodejs", stderr=errfile, stdout=logfile)
def stop(logfile, errfile):
if os.name == 'nt':
subprocess.Popen("taskkill /f /im node.exe > NUL", shell=True, stderr=errfile, stdout=logfile)
return 0
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'hello.js' in line:
pid = int(line.split(None, 2)[1])
try:
os.kill(pid, 15)
except OSError:
pass
return 0
| 34.673913
| 131
| 0.67837
|
60ec469db3b10c7ff88303b487b437e48e57ddf3
| 411
|
py
|
Python
|
hackerrank/gridland-metro/solution.py
|
SamProkopchuk/coding-problems
|
fa0ca2c05ac90e41945de1a5751e5545a8459ac4
|
[
"MIT"
] | null | null | null |
hackerrank/gridland-metro/solution.py
|
SamProkopchuk/coding-problems
|
fa0ca2c05ac90e41945de1a5751e5545a8459ac4
|
[
"MIT"
] | null | null | null |
hackerrank/gridland-metro/solution.py
|
SamProkopchuk/coding-problems
|
fa0ca2c05ac90e41945de1a5751e5545a8459ac4
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
d = defaultdict(list)
n, m, k = map(int, input().split())
res = n * m
for _ in range(k):
r, c1, c2 = map(int, input().split())
d[r].append((c1, c2))
for r in d:
d[r].sort()
s, e = d[r][0]
for (c1, c2) in d[r]:
if c1 > e:
res -= e - s + 1
s = c1
e = max(e, c2)
res -= e - s + 1
print(res)
| 19.571429
| 42
| 0.440389
|
55a8d83ee4d91e4f6be0c285a92752099b25807c
| 7,005
|
py
|
Python
|
tests/test_perspective_n_points.py
|
3D-Data-Processing/pytorch3d
|
20ef9195f0721bc77f10f5af77d44fc1d4ede4ff
|
[
"BSD-3-Clause"
] | 1
|
2020-07-18T19:03:11.000Z
|
2020-07-18T19:03:11.000Z
|
tests/test_perspective_n_points.py
|
3D-Data-Processing/pytorch3d
|
20ef9195f0721bc77f10f5af77d44fc1d4ede4ff
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_perspective_n_points.py
|
3D-Data-Processing/pytorch3d
|
20ef9195f0721bc77f10f5af77d44fc1d4ede4ff
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from common_testing import TestCaseMixin
from pytorch3d.ops import perspective_n_points
from pytorch3d.transforms import rotation_conversions
def reproj_error(x_world, y, R, T, weight=None):
# applies the affine transform, projects, and computes the reprojection error
y_hat = torch.matmul(x_world, R) + T[:, None, :]
y_hat = y_hat / y_hat[..., 2:]
if weight is None:
weight = y.new_ones((1, 1))
return (((weight[:, :, None] * (y - y_hat[..., :2])) ** 2).sum(dim=-1) ** 0.5).mean(
dim=-1
)
class TestPerspectiveNPoints(TestCaseMixin, unittest.TestCase):
def setUp(self) -> None:
super().setUp()
torch.manual_seed(42)
@classmethod
def _generate_epnp_test_from_2d(cls, y):
"""
Instantiate random x_world, x_cam, R, T given a set of input
2D projections y.
"""
batch_size = y.shape[0]
x_cam = torch.cat((y, torch.rand_like(y[:, :, :1]) * 2.0 + 3.5), dim=2)
x_cam[:, :, :2] *= x_cam[:, :, 2:] # unproject
R = rotation_conversions.random_rotations(batch_size).to(y)
T = torch.randn_like(R[:, :1, :])
T[:, :, 2] = (T[:, :, 2] + 3.0).clamp(2.0)
x_world = torch.matmul(x_cam - T, R.transpose(1, 2))
return x_cam, x_world, R, T
def _run_and_print(self, x_world, y, R, T, print_stats, skip_q, check_output=False):
sol = perspective_n_points.efficient_pnp(
x_world, y.expand_as(x_world[:, :, :2]), skip_quadratic_eq=skip_q
)
err_2d = reproj_error(x_world, y, sol.R, sol.T)
R_est_quat = rotation_conversions.matrix_to_quaternion(sol.R)
R_quat = rotation_conversions.matrix_to_quaternion(R)
num_pts = x_world.shape[-2]
# quadratic part is more stable with fewer points
num_pts_thresh = 5 if skip_q else 4
if check_output and num_pts > num_pts_thresh:
assert_msg = (
f"test_perspective_n_points assertion failure for "
f"n_points={num_pts}, "
f"skip_quadratic={skip_q}, "
f"no noise."
)
self.assertClose(err_2d, sol.err_2d, msg=assert_msg)
self.assertTrue((err_2d < 5e-4).all(), msg=assert_msg)
def norm_fn(t):
return t.norm(dim=-1)
self.assertNormsClose(
T, sol.T[:, None, :], rtol=4e-3, norm_fn=norm_fn, msg=assert_msg
)
self.assertNormsClose(
R_quat, R_est_quat, rtol=3e-3, norm_fn=norm_fn, msg=assert_msg
)
if print_stats:
torch.set_printoptions(precision=5, sci_mode=False)
for err_2d, err_3d, R_gt, T_gt in zip(
sol.err_2d,
sol.err_3d,
torch.cat((sol.R, R), dim=-1),
torch.stack((sol.T, T[:, 0, :]), dim=-1),
):
print("2D Error: %1.4f" % err_2d.item())
print("3D Error: %1.4f" % err_3d.item())
print("R_hat | R_gt\n", R_gt)
print("T_hat | T_gt\n", T_gt)
def _testcase_from_2d(self, y, print_stats, benchmark, skip_q=False):
x_cam, x_world, R, T = TestPerspectiveNPoints._generate_epnp_test_from_2d(
y[None].repeat(16, 1, 1)
)
if print_stats:
print("Run without noise")
if benchmark: # return curried call
torch.cuda.synchronize()
def result():
self._run_and_print(x_world, y, R, T, False, skip_q)
torch.cuda.synchronize()
return result
self._run_and_print(x_world, y, R, T, print_stats, skip_q, check_output=True)
# in the noisy case, there are no guarantees, so we check it doesn't crash
if print_stats:
print("Run with noise")
x_world += torch.randn_like(x_world) * 0.1
self._run_and_print(x_world, y, R, T, print_stats, skip_q)
def case_with_gaussian_points(
self, batch_size=10, num_pts=20, print_stats=False, benchmark=True, skip_q=False
):
return self._testcase_from_2d(
torch.randn((num_pts, 2)).cuda() / 3.0,
print_stats=print_stats,
benchmark=benchmark,
skip_q=skip_q,
)
def test_perspective_n_points(self, print_stats=False):
if print_stats:
print("RUN ON A DENSE GRID")
u = torch.linspace(-1.0, 1.0, 20)
v = torch.linspace(-1.0, 1.0, 15)
for skip_q in [False, True]:
self._testcase_from_2d(
torch.cartesian_prod(u, v).cuda(), print_stats, False, skip_q
)
for num_pts in range(6, 3, -1):
for skip_q in [False, True]:
if print_stats:
print(f"RUN ON {num_pts} points; skip_quadratic: {skip_q}")
self.case_with_gaussian_points(
num_pts=num_pts,
print_stats=print_stats,
benchmark=False,
skip_q=skip_q,
)
def test_weighted_perspective_n_points(self, batch_size=16, num_pts=200):
# instantiate random x_world and y
y = torch.randn((batch_size, num_pts, 2)).cuda() / 3.0
x_cam, x_world, R, T = TestPerspectiveNPoints._generate_epnp_test_from_2d(y)
# randomly drop 50% of the rows
weights = (torch.rand_like(x_world[:, :, 0]) > 0.5).float()
# make sure we retain at least 6 points for each case
weights[:, :6] = 1.0
# fill ignored y with trash to ensure that we get different
# solution in case the weighting is wrong
y = y + (1 - weights[:, :, None]) * 100.0
def norm_fn(t):
return t.norm(dim=-1)
for skip_quadratic_eq in (True, False):
# get the solution for the 0/1 weighted case
sol = perspective_n_points.efficient_pnp(
x_world, y, skip_quadratic_eq=skip_quadratic_eq, weights=weights
)
sol_R_quat = rotation_conversions.matrix_to_quaternion(sol.R)
sol_T = sol.T
# check that running only on points with non-zero weights ends in the
# same place as running the 0/1 weighted version
for i in range(batch_size):
ok = weights[i] > 0
x_world_ok = x_world[i, ok][None]
y_ok = y[i, ok][None]
sol_ok = perspective_n_points.efficient_pnp(
x_world_ok, y_ok, skip_quadratic_eq=False
)
R_est_quat_ok = rotation_conversions.matrix_to_quaternion(sol_ok.R)
self.assertNormsClose(sol_T[i], sol_ok.T[0], rtol=3e-3, norm_fn=norm_fn)
self.assertNormsClose(
sol_R_quat[i], R_est_quat_ok[0], rtol=3e-4, norm_fn=norm_fn
)
| 37.66129
| 88
| 0.570021
|
021f7eb949236e09c177c7cdea7d98aa2700bf88
| 1,835
|
py
|
Python
|
tests/acceptance/test_project_overview.py
|
boblail/sentry
|
71127331e58791d4651e480b65dd66f06cadc1c8
|
[
"BSD-3-Clause"
] | 1
|
2019-08-28T11:03:13.000Z
|
2019-08-28T11:03:13.000Z
|
tests/acceptance/test_project_overview.py
|
boblail/sentry
|
71127331e58791d4651e480b65dd66f06cadc1c8
|
[
"BSD-3-Clause"
] | 1
|
2022-01-15T02:36:18.000Z
|
2022-01-15T02:36:18.000Z
|
tests/acceptance/test_project_overview.py
|
gaybro8777/sentry
|
4594f479db9a079d7f1ed41a9e07d8f36953319f
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import pytz
from datetime import datetime
from django.utils import timezone
from mock import patch
from sentry.testutils import AcceptanceTestCase, SnubaTestCase
class ProjectOverviewTest(AcceptanceTestCase, SnubaTestCase):
def setUp(self):
super(ProjectOverviewTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.org = self.create_organization(
owner=self.user, name='Rowdy Tiger')
self.team = self.create_team(
organization=self.org, name='Mariachi Band')
self.project = self.create_project(
organization=self.org,
teams=[self.team],
name='Bengal',
)
self.login_as(self.user)
self.path = u'/{}/{}/dashboard/'.format(
self.org.slug, self.project.slug)
@patch('django.utils.timezone.now')
def test_with_issues(self, mock_now):
mock_now.return_value = datetime.utcnow().replace(tzinfo=pytz.utc)
self.store_event(
data={
'message': 'Foo bar',
'level': 'error',
'timestamp': timezone.now().isoformat()[:19]
},
project_id=self.project.id,
assert_no_errors=False
)
self.browser.get(self.path)
self.browser.wait_until('.chart-wrapper')
self.browser.wait_until_not('.loading')
self.browser.snapshot('project dashboard with issues')
def test_with_no_issues(self):
self.project.update(first_event=timezone.now())
self.browser.get(self.path)
self.browser.wait_until_not('.loading')
self.browser.wait_until('.group-list-empty')
self.browser.wait_until_not('.loading')
self.browser.snapshot('project dashboard without issues')
| 33.981481
| 74
| 0.633243
|
fe29faea2b770c7f06ca60abc9c6f06413625579
| 4,615
|
py
|
Python
|
sadpanda/structs/processor.py
|
sebanski/sadpanda
|
2561877bfd43cff28b575844a22fe06ab5f1f2f7
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
sadpanda/structs/processor.py
|
sebanski/sadpanda
|
2561877bfd43cff28b575844a22fe06ab5f1f2f7
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
sadpanda/structs/processor.py
|
sebanski/sadpanda
|
2561877bfd43cff28b575844a22fe06ab5f1f2f7
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
'''
the processor object that handles forward encryption and reverse decryption
__ __
/ \ ^ / \
\ / | \ /
| . | . |
\__A__/
\|. / | \ .|/
\+<| | |>+/
\ /|\ /
V===V
| |
___/ \___
sadpanda
by: alex balzer
'''
import pickle
import os
import logging
import threading
from sadpanda.icrypt.aes import iAES # TODO: remove import
from sadpanda.structs.datastore import DataStore
from sadpanda.server.distribution import DistributionManager
from sadpanda.server.server import AsyncServer
from sadpanda.structs.queueing import QueueingSystem
from sadpanda.structs.ledger import Ledger
logger = logging.getLogger(__name__)
class Processor(object): # Blockchain
'''
base processor - handles processing ledgers
for the time being this will be the base blockchain item
'''
def __init__(self, start_value, key, args):
# TODO: this is a big one as it basically will invent the market. if you use this library for that kind of thing.
self.args = args
self.server = AsyncServer(2828, args)
self.distribution_manager = DistributionManager("Please add a server here or delete this variable.", args, self.create_p2p_hash(args.p2p_nodes))
self.queueing_system = QueueingSystem(args)
self.datastore = self.initialize_blockchain()
if len(self.datastore) == 0:
ledger = self.create_ledger(start_value, key, args)
self.store_ledger(ledger)
# TODO: the processor needs to be spawned into a pseudo infintite object that accepts requests and turns them into ledgers if they fit the bill
self.save_datastore(args.datastore)
# - now start the server
#self._run_server(args)
# - now start the infinite Processor item
#self.daemonize(args)
def initialize_blockchain(self):
# TODO: start the blockchain datastore check the filesystem for the blocks
return DataStore()
def load_blockchain(self, args):
# load the blockchain from disk.
# TODO: implement the blockchain load feature
self.queueing_system.load_blockchain(args)
def create_ledger(self, current_value, key, args):
if self.validate(current_value):
current_ledger = Ledger(current_value, key, args)
return current_ledger
def validate(self, value):
''' validate a value before doing anything further with it. '''
# TODO: you need to implement this method as it is a very important security measure that maintains the integrity of the system
return True
def add_entry(self, item):
'''
add a single ledger into the blockchain.
'''
ledger = Ledger(item, self.datastore.get_previous_item().ledger.encrypted_item, self.args)
self.datastore.add_item(ledger)
def store_ledger(self, ledger):
self.datastore.add_item(ledger)
def save_datastore(self, filename):
# TODO: this needs a lot of work.
#if not os.path.isdir(filename):
# os.mkdir("data")
try:
pickle.dump(self, open(filename, 'ab'))
except:
directory_name = filename.split('/')
directory_name.remove('')
logger.error("Directory '%s' does not exist! Please create this directory before you can run this application." % directory_name[:-1])
def run_server(self, args): #, hostname='', port=80):
'''
Should the processor control the http server? or maybe just launch it in the background.
'''
# TODO: you need to spawn the httpserver with the processor object as it handles everything else. use asyncio.
self.hostname = args.hostname
self.port = args.port
self.server_threads = args.server_threads
def _run_server(self, args):
self.server_threads = []
self.server_threads = args.server_threads
for i in range(self.server_threads):
t = threading.Thread(target=self.run_server(args))
self.server_threads.append(t)
t.start()
def _initialize(self, args):
pass
def send_key(self, key):
# TODO: send the key off into space. wherever your key ledger datastore is at.....
pass
def delete_key(self, filename):
pass
r, r1 = os.popen2("rm %s"%filename)
logger.info("Doing some dd's....")
def daemonize(self, key, args):
# TODO: implement the functionality of the Processor object. how it handles requests received etc...
logger.info("Starting the main sadpanda process.....")
self.send_key(key)
# NOTE: you should depreciate this method as it implies having a key on disk.
self.delete_key(args.keyfile)
logger.info("Initializing the main daemon......")
self._initialize(args)
def create_p2p_hash(self, p2p_nodes):
''' create a local hash that contains the config value p2p node list. '''
p2p_hash = {}
for i in range(len(p2p_nodes)):
p2p_hash[i] = p2p_nodes[i]
return p2p_hash
| 32.964286
| 146
| 0.71961
|
e75f3b21aed70117be794556da48812a3de9f0b9
| 2,097
|
py
|
Python
|
lstm_rl/parameters.py
|
liusida/lstm_rl
|
641f94989e8669c97b03e41d003d0061b374ca79
|
[
"MIT"
] | null | null | null |
lstm_rl/parameters.py
|
liusida/lstm_rl
|
641f94989e8669c97b03e41d003d0061b374ca79
|
[
"MIT"
] | null | null | null |
lstm_rl/parameters.py
|
liusida/lstm_rl
|
641f94989e8669c97b03e41d003d0061b374ca79
|
[
"MIT"
] | null | null | null |
import argparse
from dataclasses import dataclass
from yamldataclassconfig.config import YamlDataClassConfig
@dataclass
class HyperParameters(YamlDataClassConfig):
"""
Treatment: how many different features_extractors do you want to use in parallel?
"""
num_lstm: int = 1
num_mlp: int = 0 # TODO: mlp is 1:1, but lstm is n:1, how to make them compatible?
"""
Basic Information
"""
env: str = ""
experiment_name: str = "DefaultExp"
env_mask_velocity: bool = False
seed: int = 0
# multiprocess?
asynchronous_environment: bool = False
# Force using CPU for gathering trajectories.
force_cpu_gather: bool = True
# Save training state frequency in PPO iterations.
checkpoint_frequency: int = 10
"""
Hyperparameters of the models
"""
scale_reward: float = 0.01
min_reward: float = -1000.
hidden_size: int = 128
batch_size: int = 512
discount: float = 0.99
gae_lambda: float = 0.95
ppo_clip: float = 0.2
ppo_epochs: int = 10
max_grad_norm: float = 1.
entropy_factor: float = 0.
actor_learning_rate: float = 1e-4
critic_learning_rate: float = 1e-4
recurrent_seq_len: int = 8
recurrent_layers: int = 1
rollout_steps: int = 2048
parallel_rollouts: int = 1
patience: int = 200
# Apply to continous action spaces only
trainable_std_dev: bool = False
init_log_std_dev: float = 0.0
# Stop Condition
max_iterations: int = 1000000
# Render
render: bool = True
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config-yaml-path", type=str, required=True, help="Specify the yaml configure file for the experiment.")
args = parser.parse_args()
hp = HyperParameters()
hp.load(args.config_yaml_path)
| 36.155172
| 132
| 0.58083
|
b7e9a0e24d334c9016387b5ff4bbecba32d71dbe
| 339
|
py
|
Python
|
templatetags/churchmanager_extras.py
|
bm424/churchmanager
|
474e96af0c5257de3bf4c4faf4438b0292f879ab
|
[
"MIT"
] | null | null | null |
templatetags/churchmanager_extras.py
|
bm424/churchmanager
|
474e96af0c5257de3bf4c4faf4438b0292f879ab
|
[
"MIT"
] | 9
|
2016-12-10T22:33:14.000Z
|
2017-01-29T16:26:57.000Z
|
templatetags/churchmanager_extras.py
|
bm424/churchmanager
|
474e96af0c5257de3bf4c4faf4438b0292f879ab
|
[
"MIT"
] | null | null | null |
import re
from django import template
from django.core.urlresolvers import reverse
register = template.Library()
@register.simple_tag(takes_context=True)
def town_or_village(context, urlname):
pattern = "^" + reverse(urlname)
path = context['request'].path
if re.search(pattern, path):
return 'active'
return ''
| 22.6
| 44
| 0.716814
|
f59807a971a083aa1a1297e4afa6dc3e6d566d6c
| 4,752
|
py
|
Python
|
examples/mixture/plot_gmm_covariances.py
|
talahajeer/scikit-learn
|
d66b42708a5912039740cd08f747229433e579b5
|
[
"BSD-3-Clause"
] | 27
|
2015-01-22T22:30:09.000Z
|
2022-02-15T07:33:06.000Z
|
examples/mixture/plot_gmm_covariances.py
|
talahajeer/scikit-learn
|
d66b42708a5912039740cd08f747229433e579b5
|
[
"BSD-3-Clause"
] | 26
|
2019-11-11T18:17:02.000Z
|
2020-05-14T02:57:37.000Z
|
examples/mixture/plot_gmm_covariances.py
|
talahajeer/scikit-learn
|
d66b42708a5912039740cd08f747229433e579b5
|
[
"BSD-3-Clause"
] | 25
|
2015-07-30T13:47:25.000Z
|
2021-08-03T07:48:38.000Z
|
"""
===============
GMM covariances
===============
Demonstration of several covariances types for Gaussian mixture models.
See :ref:`gmm` for more information on the estimator.
Although GMM are often used for clustering, we can compare the obtained
clusters with the actual classes from the dataset. We initialize the means
of the Gaussians with the means of the classes from the training set to make
this comparison valid.
We plot predicted labels on both training and held out test data using a
variety of GMM covariance types on the iris dataset.
We compare GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
# Author: Ron Weiss <ronweiss@gmail.com>, Gael Varoquaux
# Modified by Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import StratifiedKFold
print(__doc__)
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
if gmm.covariance_type == 'full':
covariances = gmm.covariances_[n][:2, :2]
elif gmm.covariance_type == 'tied':
covariances = gmm.covariances_[:2, :2]
elif gmm.covariance_type == 'diag':
covariances = np.diag(gmm.covariances_[n][:2])
elif gmm.covariance_type == 'spherical':
covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]
v, w = np.linalg.eigh(covariances)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
ax.set_aspect('equal', 'datalim')
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_splits=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
estimators = {cov_type: GaussianMixture(n_components=n_classes,
covariance_type=cov_type, max_iter=20, random_state=0)
for cov_type in ['spherical', 'diag', 'tied', 'full']}
n_estimators = len(estimators)
plt.figure(figsize=(3 * n_estimators // 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, estimator) in enumerate(estimators.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
estimator.means_init = np.array([X_train[y_train == i].mean(axis=0)
for i in range(n_classes)])
# Train the other parameters using the EM algorithm.
estimator.fit(X_train)
h = plt.subplot(2, n_estimators // 2, index + 1)
make_ellipses(estimator, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = estimator.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = estimator.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(scatterpoints=1, loc='lower right', prop=dict(size=12))
plt.show()
| 34.686131
| 76
| 0.671928
|
edf9286afb6daab59c9d3e4ed9abfc874ff41b0f
| 3,708
|
py
|
Python
|
mysodexo/api.py
|
AndreMiras/mysodexo
|
3d0836c38e10d579a8758b998744d1a38ff77260
|
[
"MIT"
] | 9
|
2019-11-13T08:15:51.000Z
|
2022-03-07T18:48:28.000Z
|
mysodexo/api.py
|
AndreMiras/mysodexo
|
3d0836c38e10d579a8758b998744d1a38ff77260
|
[
"MIT"
] | 3
|
2019-11-13T16:45:33.000Z
|
2021-10-21T10:47:54.000Z
|
mysodexo/api.py
|
AndreMiras/mysodexo
|
3d0836c38e10d579a8758b998744d1a38ff77260
|
[
"MIT"
] | 1
|
2020-10-10T10:22:37.000Z
|
2020-10-10T10:22:37.000Z
|
#!/usr/bin/env python3
import os
from pprint import pprint
from typing import Any, Dict, Tuple
import requests
from mysodexo.constants import (
BASE_URL,
DEFAULT_DEVICE_UID,
DEFAULT_LANG,
DEFAULT_OS,
GET_CARDS_ENDPOINT,
GET_CLEAR_PIN_ENDPOINT,
GET_DETAIL_CARD_ENDPOINT,
JSON_RESPONSE_OK_CODE,
JSON_RESPONSE_OK_MSG,
LOGIN_ENDPOINT,
LOGIN_FROM_SESSION_ENDPOINT,
REQUESTS_CERT,
REQUESTS_HEADERS,
)
def get_full_endpoint_url(endpoint: str, lang: str = DEFAULT_LANG) -> str:
endpoint = endpoint.lstrip("/")
return f"{BASE_URL}/{lang}/{endpoint}"
def handle_code_msg(json_response: dict):
"""Raises an error if any in the `json_response`."""
code = json_response["code"]
msg = json_response["msg"]
assert code == JSON_RESPONSE_OK_CODE, (code, msg)
assert msg == JSON_RESPONSE_OK_MSG, (code, msg)
def session_post(
session: requests.sessions.Session, endpoint: str, data: Dict[str, Any]
) -> dict:
"""
Posts JSON `data` to `endpoint` using the `session`.
Handles errors and returns a json response dict.
"""
endpoint = get_full_endpoint_url(endpoint)
response = session.post(
endpoint, json=data, cert=REQUESTS_CERT, headers=REQUESTS_HEADERS
)
json_response = response.json()
handle_code_msg(json_response)
return json_response
def login(email: str, password: str) -> Tuple[requests.sessions.Session, dict]:
"""Logins with credentials and returns session and account info."""
endpoint = LOGIN_ENDPOINT
session = requests.session()
data = {
"username": email,
"pass": password,
"deviceUid": DEFAULT_DEVICE_UID,
"os": DEFAULT_OS,
}
json_response = session_post(session, endpoint, data)
account_info = json_response["response"]
return session, account_info
def login_from_session(session: requests.sessions.Session) -> dict:
"""Logins with session and returns account info."""
endpoint = LOGIN_FROM_SESSION_ENDPOINT
data: Dict[str, Any] = {}
json_response = session_post(session, endpoint, data)
account_info = json_response["response"]
return account_info
def get_cards(session: requests.sessions.Session, dni: str) -> list:
"""Returns cards list and details using the session provided."""
endpoint = GET_CARDS_ENDPOINT
data = {
"dni": dni,
}
json_response = session_post(session, endpoint, data)
card_list = json_response["response"]["listCard"]
return card_list
def get_detail_card(
session: requests.sessions.Session, card_number: str
) -> dict:
"""Returns card details."""
endpoint = GET_DETAIL_CARD_ENDPOINT
data = {
"cardNumber": card_number,
}
json_response = session_post(session, endpoint, data)
details = json_response["response"]["cardDetail"]
return details
def get_clear_pin(session: requests.sessions.Session, card_number: str) -> str:
"""Returns card pin."""
endpoint = GET_CLEAR_PIN_ENDPOINT
data = {
"cardNumber": card_number,
}
json_response = session_post(session, endpoint, data)
pin = json_response["response"]["clearPin"]["pin"]
return pin
def main():
email = os.environ.get("EMAIL")
password = os.environ.get("PASSWORD")
session, account_info = login(email, password)
print("account info:")
pprint(account_info)
dni = account_info["dni"]
cards = get_cards(session, dni)
print("cards:")
pprint(cards)
card = cards[0]
card_number = card["cardNumber"]
details = get_detail_card(session, card_number)
print(f"details {card_number}:")
pprint(details)
if __name__ == "__main__":
main()
| 28.090909
| 79
| 0.685545
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.