hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
724041ed2ca944f72754f92b8120fdc76e576849
| 758
|
py
|
Python
|
hook_project/urls.py
|
tgs/webhook-project-site
|
19c409d74c50b8c293dd9152c04cc7d435b5d960
|
[
"MIT"
] | null | null | null |
hook_project/urls.py
|
tgs/webhook-project-site
|
19c409d74c50b8c293dd9152c04cc7d435b5d960
|
[
"MIT"
] | 1
|
2022-03-18T18:50:20.000Z
|
2022-03-18T18:50:20.000Z
|
hook_project/urls.py
|
tgs/webhook-project-site
|
19c409d74c50b8c293dd9152c04cc7d435b5d960
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib import admin
urlpatterns = patterns("",
url(r"^$", TemplateView.as_view(template_name="homepage.html"), name="home"),
url(r"^bkadmin/$", staff_member_required(
TemplateView.as_view(template_name="badgekit_admin.html")), name="badgekit-admin"),
url(r"^admin/", include(admin.site.urls)),
url(r"^account/", include("account.urls")),
url(r"^bk/", include("badgekit_webhooks.urls")),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 36.095238
| 91
| 0.75066
|
921cfbaa9c847b21a032647d5112c9f78f0bceb1
| 6,481
|
py
|
Python
|
couchdb/design.py
|
kocolosk/couchdb-python
|
e90e541365688063c341e58939c3ba58ceb2d69c
|
[
"BSD-3-Clause"
] | 1
|
2016-05-08T20:13:47.000Z
|
2016-05-08T20:13:47.000Z
|
couchdb/design.py
|
kocolosk/couchdb-python
|
e90e541365688063c341e58939c3ba58ceb2d69c
|
[
"BSD-3-Clause"
] | null | null | null |
couchdb/design.py
|
kocolosk/couchdb-python
|
e90e541365688063c341e58939c3ba58ceb2d69c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Utility code for managing design documents."""
from copy import deepcopy
from itertools import groupby
from operator import attrgetter
from textwrap import dedent
__all__ = ['ViewDefinition']
__docformat__ = 'restructuredtext en'
class ViewDefinition(object):
r"""Definition of a view stored in a specific design document.
An instance of this class can be used to access the results of the view,
as well as to keep the view definition in the design document up to date
with the definition in the application code.
>>> from couchdb import Server
>>> server = Server('http://localhost:5984/')
>>> db = server.create('python-tests')
>>> view = ViewDefinition('tests', 'all', '''function(doc) {
... emit(doc._id, null);
... }''')
>>> view.get_doc(db)
The view is not yet stored in the database, in fact, design doc doesn't
even exist yet. That can be fixed using the `sync` method:
>>> view.sync(db)
>>> design_doc = view.get_doc(db)
>>> design_doc #doctest: +ELLIPSIS
<Document '_design/tests'@'...' {...}>
>>> print design_doc['views']['all']['map']
function(doc) {
emit(doc._id, null);
}
Use the static `sync_many()` method to create or update a collection of
views in the database in an atomic and efficient manner, even across
different design documents.
>>> del server['python-tests']
"""
def __init__(self, design, name, map_fun, reduce_fun=None,
language='javascript', wrapper=None, **defaults):
"""Initialize the view definition.
Note that the code in `map_fun` and `reduce_fun` is automatically
dedented, that is, any common leading whitespace is removed from each
line.
:param design: the name of the design document
:param name: the name of the view
:param map_fun: the map function code
:param reduce_fun: the reduce function code (optional)
:param language: the name of the language used
:param wrapper: an optional callable that should be used to wrap the
result rows
"""
if design.startswith('_design/'):
design = design[8:]
self.design = design
self.name = name
self.map_fun = dedent(map_fun.lstrip('\n\r'))
if reduce_fun:
reduce_fun = dedent(reduce_fun.lstrip('\n\r'))
self.reduce_fun = reduce_fun
self.language = language
self.wrapper = wrapper
self.defaults = defaults
def __call__(self, db, **options):
"""Execute the view in the given database.
:param db: the `Database` instance
:param options: optional query string parameters
:return: the view results
:rtype: `ViewResults`
"""
merged_options = self.defaults.copy()
merged_options.update(options)
return db.view('/'.join([self.design, self.name]),
wrapper=self.wrapper, **merged_options)
def __repr__(self):
return '<%s %r>' % (type(self).__name__,
'/'.join(['_view', self.design, self.name]))
def get_doc(self, db):
"""Retrieve and return the design document corresponding to this view
definition from the given database.
:param db: the `Database` instance
:return: a `client.Document` instance, or `None` if the design document
does not exist in the database
:rtype: `Document`
"""
return db.get('_design/%s' % self.design)
def sync(self, db):
"""Ensure that the view stored in the database matches the view defined
by this instance.
:param db: the `Database` instance
"""
type(self).sync_many(db, [self])
@staticmethod
def sync_many(db, views, remove_missing=False, callback=None):
"""Ensure that the views stored in the database that correspond to a
given list of `ViewDefinition` instances match the code defined in
those instances.
This function might update more than one design document. This is done
using the CouchDB bulk update feature to ensure atomicity of the
operation.
:param db: the `Database` instance
:param views: a sequence of `ViewDefinition` instances
:param remove_missing: whether views found in a design document that
are not found in the list of `ViewDefinition`
instances should be removed
:param callback: a callback function that is invoked when a design
document gets updated; the callback gets passed the
design document as only parameter, before that doc
has actually been saved back to the database
"""
docs = []
for design, views in groupby(views, key=attrgetter('design')):
doc_id = '_design/%s' % design
doc = db.get(doc_id, {'_id': doc_id})
orig_doc = deepcopy(doc)
languages = set()
missing = list(doc.get('views', {}).keys())
for view in views:
funcs = {'map': view.map_fun}
if view.reduce_fun:
funcs['reduce'] = view.reduce_fun
doc.setdefault('views', {})[view.name] = funcs
languages.add(view.language)
if view.name in missing:
missing.remove(view.name)
if remove_missing and missing:
for name in missing:
del doc['views'][name]
elif missing and 'language' in doc:
languages.add(doc['language'])
if len(languages) > 1:
raise ValueError('Found different language views in one '
'design document (%r)', list(languages))
doc['language'] = list(languages)[0]
if doc != orig_doc:
if callback is not None:
callback(doc)
docs.append(doc)
db.update(docs)
| 37.034286
| 79
| 0.585095
|
ab9ee1e1daf659253d904ed06d6f9f3d6d665f6a
| 1,128
|
py
|
Python
|
pympris/MediaPlayer.py
|
TingPing/pympris
|
26048251ca82dfb1437b7488a6e54f1aae19c212
|
[
"MIT"
] | 1
|
2019-10-07T11:47:18.000Z
|
2019-10-07T11:47:18.000Z
|
pympris/MediaPlayer.py
|
TingPing/pympris
|
26048251ca82dfb1437b7488a6e54f1aae19c212
|
[
"MIT"
] | null | null | null |
pympris/MediaPlayer.py
|
TingPing/pympris
|
26048251ca82dfb1437b7488a6e54f1aae19c212
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
# Copyright (c) Mikhail Mamrouski.
# See LICENSE for details.
"""
This module provides a `MediaPlayer` class
wich contains instances of all implementations of MPRIS2 interfaces.
Usage::
mp = MediaPlayer('org.mpris.MediaPlayer2.rhythmbox')
print mp.root.Identity
if mp.root.CanRaise:
mp.root.Raise()
if mp.player.CanPause and mp.player.CanPlay:
mp.player.PlayPause()
if mp.player.CanGoNext:
mp.player.Next()
print mp.track_list.Tracks
print mp.playlists.GetPlaylists
if mp.root.CanQuit:
mp.root.Quit()
"""
from .Root import Root
from .Player import Player
from .PlayLists import PlayLists
from .TrackList import TrackList
class MediaPlayer(object):
"""Class uses as helper class."""
def __init__(self, dbus_name, bus=None, private=False):
super(MediaPlayer, self).__init__()
self.root = Root(dbus_name, bus, private)
self.player = Player(dbus_name, bus, private)
self.playlists = PlayLists(dbus_name, bus, private)
self.track_list = TrackList(dbus_name, bus, private)
| 24.521739
| 68
| 0.689716
|
2c4f98eddb8c7d6cbbb90b9df620b497ad183cd1
| 1,272
|
py
|
Python
|
test/test_ezsigntemplatedocument_get_words_positions_v1_response_m_payload.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
test/test_ezsigntemplatedocument_get_words_positions_v1_response_m_payload.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
test/test_ezsigntemplatedocument_get_words_positions_v1_response_m_payload.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
"""
eZmax API Definition (Full)
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.7
Contact: support-api@ezmax.ca
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import eZmaxApi
from eZmaxApi.model.custom_word_position_word_response import CustomWordPositionWordResponse
globals()['CustomWordPositionWordResponse'] = CustomWordPositionWordResponse
from eZmaxApi.model.ezsigntemplatedocument_get_words_positions_v1_response_m_payload import EzsigntemplatedocumentGetWordsPositionsV1ResponseMPayload
class TestEzsigntemplatedocumentGetWordsPositionsV1ResponseMPayload(unittest.TestCase):
"""EzsigntemplatedocumentGetWordsPositionsV1ResponseMPayload unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEzsigntemplatedocumentGetWordsPositionsV1ResponseMPayload(self):
"""Test EzsigntemplatedocumentGetWordsPositionsV1ResponseMPayload"""
# FIXME: construct object with mandatory attributes with example values
# model = EzsigntemplatedocumentGetWordsPositionsV1ResponseMPayload() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 32.615385
| 149
| 0.790094
|
3999c2a0fef9fcb8224b405a1fd8bbbe4928eaa5
| 633
|
py
|
Python
|
backend/education/urls.py
|
draihal/main-pr
|
81814c5370b592963e91ad0683caa560b0ea9579
|
[
"MIT"
] | 2
|
2021-01-28T08:23:15.000Z
|
2021-03-09T06:06:58.000Z
|
backend/education/urls.py
|
draihal/main-pr
|
81814c5370b592963e91ad0683caa560b0ea9579
|
[
"MIT"
] | 9
|
2020-01-02T15:31:04.000Z
|
2021-12-09T01:59:26.000Z
|
backend/education/urls.py
|
draihal/main-pr
|
81814c5370b592963e91ad0683caa560b0ea9579
|
[
"MIT"
] | 1
|
2021-03-09T06:11:16.000Z
|
2021-03-09T06:11:16.000Z
|
from django.urls import path, include
from rest_framework import routers
from education import views
app_name = 'education'
router = routers.DefaultRouter()
router.register('education/grades', views.GradeViewSet, 'grades')
router.register('education/homework', views.HomeworkViewSet, 'homework')
router.register('education/groups', views.GroupViewSet, 'groups')
router.register('education/lessons', views.LessonViewSet, 'lessons')
router.register('education/modules', views.ModuleViewSet, 'modules')
router.register('education/payments', views.PaymentViewSet, 'payments')
urlpatterns = [
path('', include(router.urls)),
]
| 28.772727
| 72
| 0.777251
|
827ba94e70ab75a727a5601ebc7a78c244db8e3e
| 2,725
|
py
|
Python
|
aircopy/autogen.py
|
st3107/aircopy
|
426c321c247f74b0e556f7ed78808d440734d284
|
[
"BSD-3-Clause"
] | null | null | null |
aircopy/autogen.py
|
st3107/aircopy
|
426c321c247f74b0e556f7ed78808d440734d284
|
[
"BSD-3-Clause"
] | 4
|
2020-05-21T17:01:33.000Z
|
2020-05-29T19:46:18.000Z
|
aircopy/autogen.py
|
st3107/aircopy
|
426c321c247f74b0e556f7ed78808d440734d284
|
[
"BSD-3-Clause"
] | null | null | null |
import copy
from datetime import datetime, timedelta
from typing import List, Union
MILESTONES_TEMPLATE = [
{
'audience': ['pi', 'lead', 'group members', 'collaborators'],
'due_date': timedelta(days=7),
'name': 'Kick off meeting',
'objective': 'roll out of project to team',
'status': 'proposed'
},
{
'audience': ['pi', 'lead', 'group members'],
'due_date': timedelta(days=14),
'name': 'project lead presentation',
'objective': 'lead presents background reading and initial project plan',
'status': 'proposed'
},
{
'audience': ['pi', 'lead', 'group members'],
'due_date': timedelta(days=28),
'name': 'planning meeting',
'objective': 'develop a detailed plan with dates',
'status': 'proposed'
}
]
DELIVERABLE_TEMPLATE = {
"audience": ['pi', 'lead', 'group members', 'collaborators'],
"due_date": timedelta(days=365),
"success_def": "audience is happy",
"scope": [
"UCs that are supported or some other scope description "
"if it is software",
"sketch of science story if it is paper"
],
"platform": "description of how and where the audience will access "
"the deliverable. Journal if it is a paper",
"roll_out": [
"steps that the audience will take to access and interact with "
"the deliverable",
"not needed for paper submissions"
],
"status": "proposed"
}
def _assgin_due_date(template: Union[List[dict], dict], start_date: str) -> None:
"""Assign the due date to tempalte according to the start_date."""
if isinstance(template, dict):
start_date = datetime.strptime(start_date, '%Y-%m-%d')
time_gap = template['due_date']
due_date = start_date + time_gap
template['due_date'] = due_date.strftime('%Y-%m-%d')
elif isinstance(template, list):
for item in template:
_assgin_due_date(item, start_date)
else:
raise TypeError("Unkown template type: {}".format(type(template)))
return
def auto_gen_milestons(start_date: str, template: List[dict] = None) -> List[dict]:
"""Automatically generate the milestones list according to the template."""
if template is None:
template = copy.deepcopy(MILESTONES_TEMPLATE)
_assgin_due_date(template, start_date)
return template
def auto_gen_deliverable(start_date: str, template: dict = None) -> dict:
"""Automatically generate the deliverable dictionary according to the template."""
if template is None:
template = copy.deepcopy(DELIVERABLE_TEMPLATE)
_assgin_due_date(template, start_date)
return template
| 34.935897
| 86
| 0.635596
|
5875142c34d64f8414929bd43ccf37971bc97df8
| 430
|
py
|
Python
|
configs/_base_/models/res2net50-w14-s8.py
|
YuxinZou/mmclassification
|
2037260ea6c98a3b115e97727e1151a1c2c32f7a
|
[
"Apache-2.0"
] | 1,190
|
2020-07-10T01:16:01.000Z
|
2022-03-31T09:48:38.000Z
|
configs/_base_/models/res2net50-w14-s8.py
|
YuxinZou/mmclassification
|
2037260ea6c98a3b115e97727e1151a1c2c32f7a
|
[
"Apache-2.0"
] | 702
|
2020-07-13T13:31:33.000Z
|
2022-03-31T06:48:04.000Z
|
configs/_base_/models/res2net50-w14-s8.py
|
YuxinZou/mmclassification
|
2037260ea6c98a3b115e97727e1151a1c2c32f7a
|
[
"Apache-2.0"
] | 502
|
2020-07-10T02:40:55.000Z
|
2022-03-31T02:07:09.000Z
|
model = dict(
type='ImageClassifier',
backbone=dict(
type='Res2Net',
depth=50,
scales=8,
base_width=14,
deep_stem=False,
avg_down=False,
),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=2048,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
| 22.631579
| 60
| 0.553488
|
f517f3e57ce0e7e455c5a3b9299e4b0c8b4f47fc
| 1,528
|
py
|
Python
|
gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/force_index.py
|
t-triobox/gQuant
|
6ee3ba104ce4c6f17a5755e7782298902d125563
|
[
"Apache-2.0"
] | null | null | null |
gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/force_index.py
|
t-triobox/gQuant
|
6ee3ba104ce4c6f17a5755e7782298902d125563
|
[
"Apache-2.0"
] | null | null | null |
gQuant/plugins/gquant_plugin/notebooks/cuIndicator/viz/force_index.py
|
t-triobox/gQuant
|
6ee3ba104ce4c6f17a5755e7782298902d125563
|
[
"Apache-2.0"
] | null | null | null |
import ipywidgets as widgets
from bqplot.colorschemes import CATEGORY20
from bqplot import Axis, Figure, LinearScale, Lines
import os
from greenflow.dataframe_flow.config_nodes_modules import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
from rapids_modules.cuindicator import force_index as indicator_fun # noqa #F401
def get_para_widgets():
para_selector = widgets.IntSlider(min=2, max=60, description="Force Index")
para_selector_widgets = [para_selector]
return para_selector_widgets
def get_parameters(stock_df, para_selector_widgets):
return (stock_df["close"], stock_df["volume"]) + tuple(
[w.value for w in para_selector_widgets])
def process_outputs(output, stock_df):
output.index = stock_df.index
stock_df['out'] = output
stock_df['out'] = output.fillna(0)
return stock_df
def create_figure(stock, dt_scale, sc, color_id,
f, indicator_figure_height,
figure_width, add_new_indicator):
sc_co = LinearScale()
ax_y = Axis(label='Force Index', scale=sc_co, orientation='vertical')
new_line = Lines(x=stock.datetime.to_array(), y=stock['out'].to_array(),
scales={'x': dt_scale, 'y': sc_co},
colors=[CATEGORY20[color_id[0]]])
new_fig = Figure(marks=[new_line], axes=[ax_y])
new_fig.layout.height = indicator_figure_height
new_fig.layout.width = figure_width
figs = [new_line]
# add new figure
add_new_indicator(new_fig)
return figs
| 34.727273
| 81
| 0.706806
|
bad0a1fdbfcf9aa0b3a454d5e7df65c5f375e75a
| 809
|
py
|
Python
|
sa/migrations/0053_managedobjectprofile_ipam_sync.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
sa/migrations/0053_managedobjectprofile_ipam_sync.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
sa/migrations/0053_managedobjectprofile_ipam_sync.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ----------------------------------------------------------------------
# managedobjectprofile ipam sync
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from django.db import models
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
def migrate(self):
self.db.add_column(
"sa_managedobjectprofile", "sync_ipam", models.BooleanField("Sync. IPAM", default=False)
)
self.db.add_column(
"sa_managedobjectprofile",
"fqdn_template",
models.TextField("FQDN template", null=True, blank=True),
)
| 32.36
| 100
| 0.489493
|
0eb516a57e92bac183130af085a84f9089d97b0b
| 4,208
|
py
|
Python
|
src/cosalib/aliyun.py
|
nikita-dubrovskii/coreos-assembler
|
378fb9d7670a32b1c16835739e82c15bb0e8f6aa
|
[
"Apache-2.0"
] | null | null | null |
src/cosalib/aliyun.py
|
nikita-dubrovskii/coreos-assembler
|
378fb9d7670a32b1c16835739e82c15bb0e8f6aa
|
[
"Apache-2.0"
] | null | null | null |
src/cosalib/aliyun.py
|
nikita-dubrovskii/coreos-assembler
|
378fb9d7670a32b1c16835739e82c15bb0e8f6aa
|
[
"Apache-2.0"
] | null | null | null |
import subprocess
import logging as log
import json
import sys
from cosalib.cmdlib import run_verbose
from tenacity import (
retry,
stop_after_attempt
)
def remove_aliyun_image(aliyun_id, region):
print(f"aliyun: removing image {aliyun_id} in {region}")
try:
run_verbose([
'ore',
'aliyun', '--log-level', 'debug', 'delete-image',
'--id', aliyun_id,
'--force'])
except SystemExit:
raise Exception("Failed to remove image")
@retry(reraise=True, stop=stop_after_attempt(3))
def aliyun_run_ore_replicate(build, args):
build.refresh_meta()
aliyun_img_data = build.meta.get('aliyun', [])
if len(aliyun_img_data) < 1:
raise SystemExit(("buildmeta doesn't contain source images. "
"Run buildextend-aliyun first"))
if not args.region:
args.region = subprocess.check_output([
'ore', f'--config-file={args.config}' if args.config else '',
'aliyun', 'list-regions'
]).decode().strip().split()
log.info(("default: replicating to all regions. If this is not "
" desirable, use '--regions'"))
log.info("replicating to regions: ", args.region)
# only replicate to regions that don't already exist
existing_regions = [item['name'] for item in aliyun_img_data]
duplicates = list(set(args.region).intersection(existing_regions))
if len(duplicates) > 0:
print((f"Images already exist in {duplicates} region(s)"
", skipping listed region(s)..."))
region_list = list(set(args.region) - set(duplicates))
if len(region_list) == 0:
print("no new regions detected")
sys.exit(0)
source_image = aliyun_img_data[0]['id']
source_region = aliyun_img_data[0]['name']
ore_args = [
'ore',
'--log-level', args.log_level,
'aliyun', 'copy-image',
'--image', source_image,
'--region', source_region
]
if args.config:
ore_args.extend(['--config-file', args.config])
upload_failed_in_region = None
for upload_region in region_list:
region_ore_args = ore_args.copy() + [upload_region]
print("+ {}".format(subprocess.list2cmdline(region_ore_args)))
try:
ore_data = json.loads(subprocess.check_output(region_ore_args))
except subprocess.CalledProcessError:
upload_failed_in_region = upload_region
break
aliyun_img_data.extend([
{
'name': region,
'id': val
} for region, val in ore_data.items()
])
build.meta['aliyun'] = aliyun_img_data
build.meta_write()
if upload_failed_in_region is not None:
raise Exception(f"Upload failed in {upload_failed_in_region} region")
@retry(reraise=True, stop=stop_after_attempt(3))
def aliyun_run_ore(build, args):
build.refresh_meta()
ore_args = ['ore']
if args.log_level:
ore_args.extend(['--log-level', args.log_level])
if args.force:
ore_args.extend(['--force'])
region = "us-west-1"
if args.region is not None:
region = args.region[0]
upload_name = f"{build.build_name}-{build.build_id}"
if args.name_suffix:
upload_name = f"{build.build_name}-{args.name_suffix}-{build.build_id}"
ore_args.extend([
f'--config-file={args.config}' if args.config else '',
'aliyun', 'create-image',
'--region', region,
'--bucket', args.bucket,
'--name', upload_name,
'--file', f"{build.image_path}",
'--description', f'{build.summary} {build.build_id}',
'--architecture', build.basearch,
'--disk-size-inspect'
])
print(ore_args)
# convert the binary output to string and remove trailing white space
ore_data = subprocess.check_output(ore_args).decode('utf-8').strip()
build.meta['aliyun'] = [{
'name': region,
'id': ore_data
}]
build.meta_write()
def aliyun_cli(parser):
parser.add_argument("--bucket", help="OSS Bucket")
parser.add_argument("--name-suffix", help="Suffix for uploaded image name")
return parser
| 30.941176
| 79
| 0.615494
|
16a84a3e2b43028f050d392aa3b252a383fd0912
| 2,902
|
py
|
Python
|
tests/model_fields/test_durationfield.py
|
allanice001/django
|
aac75fa30af22965f993c6dd8da8cd9882ba2f21
|
[
"BSD-3-Clause"
] | null | null | null |
tests/model_fields/test_durationfield.py
|
allanice001/django
|
aac75fa30af22965f993c6dd8da8cd9882ba2f21
|
[
"BSD-3-Clause"
] | 1
|
2021-03-24T12:21:05.000Z
|
2021-03-24T12:31:52.000Z
|
tests/model_fields/test_durationfield.py
|
allanice001/django
|
aac75fa30af22965f993c6dd8da8cd9882ba2f21
|
[
"BSD-3-Clause"
] | 2
|
2021-03-24T12:11:48.000Z
|
2021-06-10T19:56:03.000Z
|
import datetime
import json
from django import forms
from django.core import exceptions, serializers
from django.db import models
from django.test import TestCase
from .models import DurationModel, NullDurationModel
class TestSaveLoad(TestCase):
def test_simple_roundtrip(self):
duration = datetime.timedelta(days=123, seconds=123, microseconds=123)
DurationModel.objects.create(field=duration)
loaded = DurationModel.objects.get()
self.assertEqual(loaded.field, duration)
def test_create_empty(self):
NullDurationModel.objects.create()
loaded = NullDurationModel.objects.get()
self.assertEqual(loaded.field, None)
def test_fractional_seconds(self):
value = datetime.timedelta(seconds=2.05)
d = DurationModel.objects.create(field=value)
d.refresh_from_db()
self.assertEqual(d.field, value)
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
DurationModel.objects.create(field=datetime.timedelta(days=1)),
DurationModel.objects.create(field=datetime.timedelta(seconds=1)),
DurationModel.objects.create(field=datetime.timedelta(seconds=-1)),
]
def test_exact(self):
self.assertSequenceEqual(
DurationModel.objects.filter(field=datetime.timedelta(days=1)),
[self.objs[0]]
)
def test_gt(self):
self.assertSequenceEqual(
DurationModel.objects.filter(field__gt=datetime.timedelta(days=0)),
[self.objs[0], self.objs[1]]
)
class TestSerialization(TestCase):
test_data = '[{"fields": {"field": "1 01:00:00"}, "model": "model_fields.durationmodel", "pk": null}]'
def test_dumping(self):
instance = DurationModel(field=datetime.timedelta(days=1, hours=1))
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, datetime.timedelta(days=1, hours=1))
class TestValidation(TestCase):
def test_invalid_string(self):
field = models.DurationField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('not a datetime', None)
self.assertEqual(cm.exception.code, 'invalid')
self.assertEqual(
cm.exception.message % cm.exception.params,
"'not a datetime' value has an invalid format. "
"It must be in [DD] [HH:[MM:]]ss[.uuuuuu] format."
)
class TestFormField(TestCase):
# Tests for forms.DurationField are in the forms_tests app.
def test_formfield(self):
field = models.DurationField()
self.assertIsInstance(field.formfield(), forms.DurationField)
| 32.977273
| 106
| 0.673673
|
3805ea3c5e331bf86160596f16c6f5e1e1334b48
| 707
|
py
|
Python
|
docker-healthcheck.py
|
jan-di/smb1-proxy
|
97bb4ec606e82b15642a710553e8a957acfde3a0
|
[
"MIT"
] | 2
|
2021-12-06T12:03:26.000Z
|
2022-02-08T08:45:06.000Z
|
docker-healthcheck.py
|
Andreetje/smb1-proxy
|
4ba33452e20fe5d45b554a95b52ea494bac42a73
|
[
"MIT"
] | 1
|
2020-05-12T14:03:48.000Z
|
2020-05-12T14:03:48.000Z
|
docker-healthcheck.py
|
Andreetje/smb1-proxy
|
4ba33452e20fe5d45b554a95b52ea494bac42a73
|
[
"MIT"
] | 4
|
2020-05-12T13:11:49.000Z
|
2022-03-17T13:18:20.000Z
|
#!/usr/bin/env python3
import os, subprocess
fail = False
i = 0
while True:
i = i + 1
shareEnable = os.getenv('PROXY{}_ENABLE'.format(i))
if shareEnable == None:
break
elif not shareEnable == "1":
continue
remoteMount = '/remote{}'.format(i)
checkFile = remoteMount + "/healthcheck.txt"
try:
file = open(checkFile, "w")
file.write("healthcheck")
file.close()
os.remove(checkFile)
except OSError:
fail = True
print(remoteMount + " is not writeable")
ret = subprocess.call("smbclient -L \\localhost -U % -m SMB3", shell=True)
if ret != 0:
fail = True
print("Samba Server is not reachable")
if fail:
exit(1)
print("Container is healthy")
exit(0)
| 18.605263
| 74
| 0.64215
|
746d76cf5be2386a7de5999af74aea7dff08457d
| 1,377
|
py
|
Python
|
tests/test_config.py
|
zhengxiaowai/quart
|
ae1664b1d603956fbe6fd49eeae5aee8877f5d8a
|
[
"MIT"
] | null | null | null |
tests/test_config.py
|
zhengxiaowai/quart
|
ae1664b1d603956fbe6fd49eeae5aee8877f5d8a
|
[
"MIT"
] | null | null | null |
tests/test_config.py
|
zhengxiaowai/quart
|
ae1664b1d603956fbe6fd49eeae5aee8877f5d8a
|
[
"MIT"
] | null | null | null |
import os
from quart.config import Config, ConfigAttribute
TEST_KEY = 'test_value'
class ConfigInstance:
value = ConfigAttribute('VALUE')
config: dict = {}
def test_config_attribute() -> None:
instance = ConfigInstance()
instance.value = 'test'
assert instance.config['VALUE'] == 'test'
def test_config_from_object() -> None:
config = Config(os.path.dirname(__file__))
config.from_object(__name__)
assert config['TEST_KEY'] == 'test_value'
def _check_standard_config(config: Config) -> None:
assert config['FOO'] == 'bar'
assert config['BOB'] == 'jeff'
def test_config_from_pyfile() -> None:
config = Config(os.path.dirname(__file__))
config.from_pyfile('assets/config.cfg')
_check_standard_config(config)
def test_config_from_envvar() -> None:
config = Config(os.path.dirname(__file__))
os.environ['CONFIG'] = 'assets/config.cfg'
config.from_envvar('CONFIG')
_check_standard_config(config)
def test_config_from_json() -> None:
config = Config(os.path.dirname(__file__))
config.from_json('assets/config.json')
_check_standard_config(config)
def test_config_get_namespace() -> None:
config = Config(os.path.dirname(__file__))
config['FOO_A'] = 'a'
config['FOO_BAR'] = 'bar'
config['BAR'] = 'bar'
assert config.get_namespace('FOO_') == {'a': 'a', 'bar': 'bar'}
| 25.036364
| 67
| 0.684822
|
fe4dcf2749673ecf621d992fc403fac15c8a65e9
| 49,466
|
py
|
Python
|
src/abaqus/Section/SectionModel.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | 7
|
2022-01-21T09:15:45.000Z
|
2022-02-15T09:31:58.000Z
|
src/abaqus/Section/SectionModel.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
src/abaqus/Section/SectionModel.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
import typing
from abaqusConstants import *
from .AcousticInfiniteSection import AcousticInfiniteSection
from .AcousticInterfaceSection import AcousticInterfaceSection
from .BeamSection import BeamSection
from .CohesiveSection import CohesiveSection
from .CompositeShellSection import CompositeShellSection
from .CompositeSolidSection import CompositeSolidSection
from .ConnectorSection import ConnectorSection
from .EulerianSection import EulerianSection
from .GasketSection import GasketSection
from .GeneralStiffnessSection import GeneralStiffnessSection
from .HomogeneousShellSection import HomogeneousShellSection
from .HomogeneousSolidSection import HomogeneousSolidSection
from .MPCSection import MPCSection
from .MembraneSection import MembraneSection
from .PEGSection import PEGSection
from .SectionLayerArray import SectionLayerArray
from .SurfaceSection import SurfaceSection
from .TrussSection import TrussSection
from ..Connector.ConnectorBehaviorOptionArray import ConnectorBehaviorOptionArray
from ..Model.ModelBase import ModelBase
class SectionModel(ModelBase):
"""Abaqus creates a Model object named `Model-1` when a session is started.
Notes
-----
This object can be accessed by:
.. code-block:: python
mdb.models[name]
"""
def AcousticInfiniteSection(self, name: str, material: str, thickness: float = 1,
order: int = 10) -> AcousticInfiniteSection:
"""This method creates an AcousticInfiniteSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].AcousticInfiniteSection
session.odbs[name].AcousticInfiniteSection
Parameters
----------
name
A String specifying the repository key.
material
A String specifying the name of the material.
thickness
A Float specifying the thickness of the section. Possible values are *thickness* >> 0.0.
The default value is 1.0.
order
An Int specifying the number of ninth-order polynomials that will be used to resolve the
variation of the acoustic field in the infinite direction. Possible values are 0 <<
*order* ≤≤ 10. The default value is 10.
Returns
-------
An AcousticInfiniteSection object.
Raises
------
InvalidNameError
RangeError
"""
self.sections[name] = section = AcousticInfiniteSection(name, material, thickness, order)
return section
def AcousticInterfaceSection(self, name: str, thickness: float = 1) -> AcousticInterfaceSection:
"""This method creates an AcousticInterfaceSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].AcousticInterfaceSection
session.odbs[name].AcousticInterfaceSection
Parameters
----------
name
A String specifying the repository key.
thickness
A Float specifying the thickness of the section. Possible values are *thickness* >> 0.0.
The default value is 1.0.
Returns
-------
An AcousticInterfaceSection object.
Raises
------
InvalidNameError
RangeError
"""
self.sections[name] = section = AcousticInterfaceSection(name, thickness)
return section
def BeamSection(self, name: str, integration: SymbolicConstant, profile: str, poissonRatio: float = 0,
thermalExpansion: Boolean = OFF, temperatureDependency: Boolean = OFF,
dependencies: int = 0, density: float = None, referenceTemperature: float = None,
temperatureVar: SymbolicConstant = LINEAR, alphaDamping: float = 0,
betaDamping: float = 0, compositeDamping: float = 0, useFluidInertia: Boolean = OFF,
submerged: SymbolicConstant = FULLY, fluidMassDensity: float = None,
crossSectionRadius: float = None, lateralMassCoef: float = 1, axialMassCoef: float = 0,
massOffsetX: float = 0, massOffsetY: float = 0, beamShape: SymbolicConstant = CONSTANT,
material: str = '', table: tuple = (), outputPts: tuple = (),
centroid: tuple[float] = (), shearCenter: tuple[float] = (), profileEnd: str = '') -> BeamSection:
"""This method creates a BeamSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].BeamSection
session.odbs[name].BeamSection
Parameters
----------
name
A String specifying the repository key.
integration
A SymbolicConstant specifying the integration method for the section. Possible values
are BEFORE_ANALYSIS and DURING_ANALYSIS.
profile
A String specifying the name of the profile. This argument represents the start profile
in case of *beamShape*=TAPERED.
poissonRatio
A Float specifying the Poisson's ratio of the section. The default value is 0.0.
thermalExpansion
A Boolean specifying whether to use thermal expansion data. The default value is OFF.
temperatureDependency
A Boolean specifying whether the data depend on temperature. The default value is OFF.
dependencies
An Int specifying the number of field variable dependencies. The default value is 0.
density
None or a Float specifying the density of the section. The default value is None.
referenceTemperature
None or a Float specifying the reference temperature of the section. The default value
is None.
temperatureVar
A SymbolicConstant specifying the temperature variation for the section. Possible values
are LINEAR and INTERPOLATED. The default value is LINEAR.
alphaDamping
A Float specifying the αRαR factor to create mass proportional damping in
direct-integration dynamics. The default value is 0.0.
betaDamping
A Float specifying the βRβR factor to create stiffness proportional damping in
direct-integration dynamics. The default value is 0.0.
compositeDamping
A Float specifying the fraction of critical damping to be used in calculating composite
damping factors for the modes (for use in modal dynamics). The default value is 0.0.
useFluidInertia
A Boolean specifying whether added mass effects will be simulated. The default value is
OFF.
submerged
A SymbolicConstant specifying whether the section is either full submerged or half
submerged. This argument applies only when *useFluidInertia* = True. Possible values are
FULLY and HALF. The default value is FULLY.
fluidMassDensity
None or a Float specifying the mass density of the fluid. This argument applies only
when *useFluidInertia* = True and must be specified in that case. The default value is
None.
crossSectionRadius
None or a Float specifying the radius of the cylindrical cross-section. This argument
applies only when *useFluidInertia* = True and must be specified in that case. The
default value is None.
lateralMassCoef
A Float specifying the added mass coefficient, CACA, for lateral motions of the beam.
This argument applies only when*useFluidInertia* = True. The default value is 1.0.
axialMassCoef
A Float specifying the added mass coefficient, C(A−E)C(A-E), for motions along the axis
of the beam. This argument affects only the term added to the free end(s) of the beam,
and applies only when *useFluidInertia* = True. The default value is 0.0.
massOffsetX
A Float specifying the local 1-coordinate of the center of the cylindrical cross-section
with respect to the beam cross-section. This argument applies only when
*useFluidInertia* = True. The default value is 0.0.
massOffsetY
A Float specifying the local 2-coordinate of the center of the cylindrical cross-section
with respect to the beam cross-section. This argument applies only when
*useFluidInertia* = True. The default value is 0.0.
beamShape
A SymbolicConstant specifying the change in cross-section of the beam along length.
Possible values are CONSTANT and TAPERED. The default value is CONSTANT. This parameter
is available for manipulating the model database but not for the ODB API.
material
A String specifying the name of the material. The default value is an empty string. The
material is required when *integration* is "DURING_ANALYSIS".
table
A sequence of sequences of Floats specifying the items described below. The default
value is an empty sequence.
outputPts
A sequence of pairs of Floats specifying the positions at which output is requested. The
default value is an empty sequence.
centroid
A pair of Floats specifying the *X–Y* coordinates of the centroid. The default value is
(0.0, 0.0).
shearCenter
A pair of Floats specifying the *X–Y* coordinates of the shear center. The default value
is (0.0, 0.0).
profileEnd
A String specifying the name of the end profile. The type of the end profile must be
same as that of the start profile. This argument is valid only when *beamShape*=TAPERED.
The default value is an empty string. This parameter is available for manipulating the
model database but not for the ODB API.
Returns
-------
A BeamSection object.
"""
self.sections[name] = section = BeamSection(name, integration, profile, poissonRatio, thermalExpansion,
temperatureDependency, dependencies, density, referenceTemperature,
temperatureVar, alphaDamping, betaDamping, compositeDamping,
useFluidInertia, submerged, fluidMassDensity, crossSectionRadius,
lateralMassCoef, axialMassCoef, massOffsetX, massOffsetY, beamShape,
material, table, outputPts, centroid, shearCenter, profileEnd)
return section
def CohesiveSection(self, name: str, response: SymbolicConstant, material: str,
initialThicknessType: SymbolicConstant = SOLVER_DEFAULT, initialThickness: float = 1,
outOfPlaneThickness: float = None) -> CohesiveSection:
"""This method creates a CohesiveSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].CohesiveSection
session.odbs[name].CohesiveSection
Parameters
----------
name
A String specifying the repository key.
response
A SymbolicConstant specifying the geometric assumption that defines the constitutive
behavior of the cohesive elements. Possible values are TRACTION_SEPARATION, CONTINUUM,
and GASKET.
material
A String specifying the name of the material.
initialThicknessType
A SymbolicConstant specifying the method used to compute the initial thickness. Possible
values are:SOLVER_DEFAULT, specifying that Abaqus will use the analysis product
defaultGEOMETRY, specifying that Abaqus will compute the thickness from the nodal
coordinates of the elements.SPECIFY, specifying that Abaqus will use the value given for
*initialThickness*The default value is SOLVER_DEFAULT.
initialThickness
A Float specifying the initial thickness for the section. The *initialThickness*
argument applies only when *initialThicknessType*=SPECIFY. The default value is 1.0.
outOfPlaneThickness
None or a Float specifying the out-of-plane thickness for the section. The default value
is None.
Returns
-------
A CohesiveSection object.
Raises
------
RangeError and InvalidNameError.
"""
self.sections[name] = section = CohesiveSection(name, response, material, initialThicknessType,
initialThickness, outOfPlaneThickness)
return section
def CompositeShellSection(self, name: str, layup: SectionLayerArray, symmetric: Boolean = OFF,
thicknessType: SymbolicConstant = UNIFORM, preIntegrate: Boolean = OFF,
poissonDefinition: SymbolicConstant = DEFAULT, poisson: float = 0,
integrationRule: SymbolicConstant = SIMPSON, temperature: SymbolicConstant = GRADIENT,
idealization: SymbolicConstant = NO_IDEALIZATION, nTemp: int = None,
thicknessModulus: float = None, useDensity: Boolean = OFF, density: float = 0,
layupName: str = '', thicknessField: str = '',
nodalThicknessField: str = '') -> CompositeShellSection:
"""This method creates a CompositeShellSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].parts[name].compositeLayups[i].CompositeShellSection
mdb.models[name].CompositeShellSection
session.odbs[name].CompositeShellSection
Parameters
----------
name
A String specifying the repository key.
layup
A SectionLayerArray object specifying the shell cross-section.
symmetric
A Boolean specifying whether or not the layup should be made symmetric by the analysis.
The default value is OFF.
thicknessType
A SymbolicConstant specifying the distribution used for defining the thickness of the
elements. Possible values are UNIFORM, ANALYTICAL_FIELD, DISCRETE_FIELD,
NODAL_ANALYTICAL_FIELD, and NODAL_DISCRETE_FIELD. The default value is UNIFORM.
preIntegrate
A Boolean specifying whether the shell section properties are specified by the user
prior to the analysis (ON) or integrated during the analysis (OFF). The default value is
OFF.
poissonDefinition
A SymbolicConstant specifying whether to use the default value for the Poisson's ratio.
Possible values are:DEFAULT, specifying that the default value for the Poisson's ratio
is 0.5 in an Abaqus/Standard analysis and is obtained from the material definition in an
Abaqus/Explicit analysis.VALUE, specifying that the Poisson's ratio used in the analysis
is the value provided in *poisson*.The default value is DEFAULT.
poisson
A Float specifying the Poisson's ratio. Possible values are −1.0 ≤≤ *poisson* ≤≤ 0.5.
This argument is valid only when *poissonDefinition*=VALUE. The default value is 0.5.
integrationRule
A SymbolicConstant specifying the shell section integration rule. Possible values are
SIMPSON and GAUSS. The default value is SIMPSON.
temperature
A SymbolicConstant specifying the mode used for temperature and field variable input
across the section thickness. Possible values are GRADIENT and POINTWISE. The default
value is GRADIENT.
idealization
A SymbolicConstant specifying the mechanical idealization used for the section
calculations. This member is only applicable when *preIntegrate* is set to ON. Possible
values are NO_IDEALIZATION, SMEAR_ALL_LAYERS, MEMBRANE, and BENDING. The default value
is NO_IDEALIZATION.
nTemp
None or an Int specifying the number of temperature points to be input. This argument is
valid only when *temperature*=POINTWISE. The default value is None.
thicknessModulus
None or a Float specifying the effective thickness modulus. This argument is relevant
only for continuum shells and must be used in conjunction with the argument *poisson*.
The default value is None.
useDensity
A Boolean specifying whether or not to use the value of *density*. The default value is
OFF.
density
A Float specifying the value of density to apply to this section. The default value is
0.0.
layupName
A String specifying the layup name for this section. The default value is an empty
string.
thicknessField
A String specifying the name of the AnalyticalField or DiscreteField object used to
define the thickness of the shell elements. The *thicknessField* argument applies only
when *thicknessType*=ANALYTICAL_FIELD or *thicknessType*=DISCRETE_FIELD. The default
value is an empty string.
nodalThicknessField
A String specifying the name of the AnalyticalField or DiscreteField object used to
define the thickness of the shell elements at each node. The *nodalThicknessField*
argument applies only when *thicknessType*=NODAL_ANALYTICAL_FIELD or
*thicknessType*=NODAL_DISCRETE_FIELD. The default value is an empty string.
Returns
-------
A CompositeShellSection object.
"""
self.sections[name] = section = CompositeShellSection(name, layup, symmetric, thicknessType, preIntegrate,
poissonDefinition, poisson, integrationRule, temperature,
idealization, nTemp, thicknessModulus, useDensity,
density, layupName, thicknessField, nodalThicknessField)
return section
def CompositeSolidSection(self, name: str, layup: SectionLayerArray, symmetric: Boolean = OFF,
layupName: str = '') -> CompositeSolidSection:
"""This method creates a CompositeSolidSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].CompositeSolidSection
session.odbs[name].CompositeSolidSection
Parameters
----------
name
A String specifying the repository key.
layup
A SectionLayerArray object specifying the solid cross-section.
symmetric
A Boolean specifying whether or not the layup should be made symmetric by the analysis.
The default value is OFF.
layupName
A String specifying the layup name for this section. The default value is an empty
string.
Returns
-------
A CompositeSolidSection object.
"""
self.sections[name] = section = CompositeSolidSection(name, layup, symmetric, layupName)
return section
def ConnectorSection(self, name: str, assembledType: SymbolicConstant = NONE,
rotationalType: SymbolicConstant = NONE, translationalType: SymbolicConstant = NONE,
integration: SymbolicConstant = UNSPECIFIED, u1ReferenceLength: float = None,
u2ReferenceLength: float = None, u3ReferenceLength: float = None,
ur1ReferenceAngle: float = None, ur2ReferenceAngle: float = None,
ur3ReferenceAngle: float = None, massPerLength: float = None,
contactAngle: float = None, materialFlowFactor: float = 1, regularize: Boolean = ON,
defaultTolerance: Boolean = ON, regularization: float = 0,
extrapolation: SymbolicConstant = CONSTANT,
behaviorOptions: ConnectorBehaviorOptionArray = None) -> ConnectorSection:
"""This method creates a ConnectorSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].ConnectorSection
session.odbs[name].ConnectorSection
Parameters
----------
name
A String specifying the repository key.
assembledType
A SymbolicConstant specifying the assembled connection type. Possible values
are:NONEBEAMBUSHINGCVJOINTCYLINDRICALHINGEPLANARRETRACTORSLIPRINGTRANSLATORUJOINTWELDThe
default value is NONE.You cannot include the *assembledType* argument if
*translationalType* or *rotationalType* are given a value other than NONE. At least one
of the arguments *assembledType*, *translationalType*, or *rotationalType* must be given
a value other than NONE.
rotationalType
A SymbolicConstant specifying the basic rotational connection type. Possible values
are:NONEALIGNCARDANCONSTANT_VELOCITYEULERFLEXION_TORSIONFLOW_CONVERTERPROJECTION_FLEXION_TORSIONREVOLUTEROTATIONROTATION_ACCELEROMETERUNIVERSALThe
default value is NONE.You cannot include the *rotationalType* argument if
*assembledType* is given a value other than NONE. At least one of the arguments
*assembledType*, *translationalType*, or *rotationalType* must be given an value other
than NONE.
translationalType
A SymbolicConstant specifying the basic translational connection type. Possible values
are:NONEACCELEROMETERAXIALCARTESIANJOINLINKPROJECTION_CARTESIANRADIAL_THRUSTSLIDE_PLANESLOTThe
default value is NONE.You cannot include the *translationalType* argument if
*assembledType* is given a value other than NONE. At least one of the arguments
*assembledType*, *translationalType*, or *rotationalType* must be given an value other
than NONE.
integration
A SymbolicConstant specifying the time integration scheme to use for analysis. This
argument is applicable only to an Abaqus/Explicit analysis. Possible values are
UNSPECIFIED, IMPLICIT, and EXPLICIT. The default value is UNSPECIFIED.
u1ReferenceLength
None or a Float specifying the reference length associated with constitutive response
for the first component of relative motion. The default value is None.
u2ReferenceLength
None or a Float specifying the reference length associated with constitutive response
for the second component of relative motion. The default value is None.
u3ReferenceLength
None or a Float specifying the reference length associated with constitutive response
for the third component of relative motion. The default value is None.
ur1ReferenceAngle
None or a Float specifying the reference angle in degrees associated with constitutive
response for the fourth component of relative motion. The default value is None.
ur2ReferenceAngle
None or a Float specifying the reference angle in degrees associated with constitutive
response for the fifth component of relative motion. The default value is None.
ur3ReferenceAngle
None or a Float specifying the reference angle in degrees associated with constitutive
response for the sixth component of relative motion. The default value is None.
massPerLength
None or a Float specifying the mass per unit reference length of belt material. This
argument is applicable only when *assembledType*=SLIPRING, and must be specified in that
case. The default value is None.
contactAngle
None or a Float specifying the contact angle made by the belt wrapping around node b.
This argument is applicable only to an Abaqus/Explicit analysis, and only when
*assembledType*=SLIPRING. The default value is None.
materialFlowFactor
A Float specifying the scaling factor for material flow at node b. This argument is
applicable only when *assembledType*=RETRACTOR or *rotationalType*=FLOW_CONVERTER. The
default value is 1.0.
regularize
A Boolean specifying whether or not all tabular data associated with the
*behaviorOptions* will be regularized. This argument is applicable only for an
Abaqus/Explicit analysis. The default value is ON.
defaultTolerance
A Boolean specifying whether or not the default regularization tolerance will be used
for all tabular data associated with the *behaviorOptions*. This argument is applicable
only for an Abaqus/Explicit analysis and only if *regularize*=ON. The default value is
ON.
regularization
A Float specifying the regularization increment to be used for all tabular data
associated with the *behaviorOptions*. This argument is applicable only for an
Abaqus/Explicit analysis and only if *regularize*=ON and *defaultTolerance*=OFF. The
default value is 0.03.
extrapolation
A SymbolicConstant specifying the extrapolation technique to be used for all tabular
data associated with the *behaviorOptions*. Possible values are CONSTANT and LINEAR. The
default value is CONSTANT.
behaviorOptions
A ConnectorBehaviorOptionArray object.
Returns
-------
A ConnectorSection object.
Raises
------
InvalidNameError
RangeError
"""
self.sections[name] = section = ConnectorSection(name, assembledType, rotationalType, translationalType,
integration, u1ReferenceLength, u2ReferenceLength,
u3ReferenceLength, ur1ReferenceAngle, ur2ReferenceAngle,
ur3ReferenceAngle, massPerLength, contactAngle,
materialFlowFactor, regularize, defaultTolerance,
regularization, extrapolation, behaviorOptions)
return section
def EulerianSection(self, name: str, data: str) -> EulerianSection:
"""This method creates a EulerianSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].EulerianSection
session.odbs[name].EulerianSection
Parameters
----------
name
A String specifying the repository key.
data
A String-to-String Dictionary specifying a dictionary mapping Material instance names to
Material names. Internally the specified mapping gets sorted on Material instance name.
Returns
-------
An EulerianSection object.
"""
self.sections[name] = section = EulerianSection(name, data)
return section
def GasketSection(self, name: str, material: str, crossSection: float = 1, initialGap: float = 0,
initialThickness: typing.Union[SymbolicConstant, float] = DEFAULT,
initialVoid: float = 0,
stabilizationStiffness: typing.Union[SymbolicConstant, float] = DEFAULT) -> GasketSection:
"""This method creates a GasketSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].GasketSection
session.odbs[name].GasketSection
Parameters
----------
name
A String specifying the repository key.
material
A String specifying the name of the material of which the gasket is made or material
that defines gasket behavior.
crossSection
A Float specifying the cross-sectional area, width, or out-of-plane thickness, if
applicable, depending on the gasket element type. The default value is 1.0.
initialGap
A Float specifying the initial gap. The default value is 0.0.
initialThickness
The SymbolicConstant DEFAULT or a Float specifying the initial gasket thickness. If
DEFAULT is specified, the initial thickness is determined using nodal coordinates. The
default value is DEFAULT.
initialVoid
A Float specifying the initial void. The default value is 0.0.
stabilizationStiffness
The SymbolicConstant DEFAULT or a Float specifying the default stabilization stiffness
used in all but link elements to stabilize gasket elements that are not supported at all
nodes, such as those that extend outside neighboring components. If DEFAULT is
specified, a value is used equal to 10–9 times the initial compressive stiffness in the
thickness direction. The default value is DEFAULT.
Returns
-------
A GasketSection object. and ValueError.
"""
self.sections[name] = section = GasketSection(name, material, crossSection, initialGap, initialThickness,
initialVoid, stabilizationStiffness)
return section
def GeneralStiffnessSection(self, name: str, stiffnessMatrix: tuple, referenceTemperature: float = None,
applyThermalStress: Boolean = OFF, temperatureDependency: Boolean = OFF,
dependencies: int = 0, poissonDefinition: SymbolicConstant = DEFAULT,
poisson: float = 0, useDensity: Boolean = OFF, density: float = 0,
thermalStresses: tuple = (), scalingData: tuple = ()) -> GeneralStiffnessSection:
"""This method creates a GeneralStiffnessSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].GeneralStiffnessSection
session.odbs[name].GeneralStiffnessSection
Parameters
----------
name
A String specifying the repository key.
stiffnessMatrix
A sequence of Floats specifying the stiffness matrix for the section in the order D11,
D12, D22, D13, D23, D33, ...., D66. Twenty-one entries must be given.
referenceTemperature
None or a Float specifying the reference temperature for thermal expansion. The default
value is None.
applyThermalStress
A Boolean specifying whether or not the section stiffness varies with thermal stresses.
The default value is OFF.
temperatureDependency
A Boolean specifying whether the data depend on temperature. The default value is OFF.
dependencies
An Int specifying the number of field variable dependencies. The default value is 0.
poissonDefinition
A SymbolicConstant specifying whether to use the default value for the Poisson's ratio.
Possible values are:DEFAULT, specifying that the default value for the Poisson's ratio
is 0.5 in an Abaqus/Standard analysis and is obtained from the material definition in an
Abaqus/Explicit analysis.VALUE, specifying that the Poisson's ratio used in the analysis
is the value provided in *poisson*.The default value is DEFAULT.
poisson
A Float specifying the Poisson's ratio. Possible values are −1.0 ≤≤ *poisson* ≤≤ 0.5.
This argument is valid only when *poissonDefinition*=VALUE. The default value is 0.5.
useDensity
A Boolean specifying whether or not to use the value of *density*. The default value is
OFF.
density
A Float specifying the value of density to apply to this section. The default value is
0.0.
thermalStresses
A sequence of Floats specifying the generalized stress values caused by a unit
temperature rise. Six entries must be given if the value of *applyThermalStress* is set
to True. The default value is ("").
scalingData
A sequence of sequences of Floats specifying the scaling factors for given temperatures
and/or field data. Each row should contain (Y, alpha, T, F1,...,Fn). The default value
is an empty sequence.
Returns
-------
A GeneralStiffnessSection object.
"""
self.sections[name] = section = GeneralStiffnessSection(name, stiffnessMatrix, referenceTemperature,
applyThermalStress, temperatureDependency, dependencies,
poissonDefinition, poisson, useDensity, density,
thermalStresses, scalingData)
return section
def HomogeneousShellSection(self, name: str, material: str, thickness: float = 0, numIntPts: int = 5,
thicknessType: SymbolicConstant = UNIFORM, preIntegrate: Boolean = OFF,
poissonDefinition: SymbolicConstant = DEFAULT, poisson: float = 0,
integrationRule: SymbolicConstant = SIMPSON, temperature: SymbolicConstant = GRADIENT,
idealization: SymbolicConstant = NO_IDEALIZATION, nTemp: int = None,
thicknessModulus: float = None, useDensity: Boolean = OFF, density: float = 0,
thicknessField: str = '', nodalThicknessField: str = '') -> HomogeneousShellSection:
"""This method creates a HomogeneousShellSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].parts[name].compositeLayups[i]\
- .HomogeneousShellSection
mdb.models[name].HomogeneousShellSection
session.odbs[name].HomogeneousShellSection
Parameters
----------
name
A String specifying the repository key.
material
A String specifying the name of the section material.
thickness
A Float specifying the thickness of the section. The *thickness* argument applies only
when *thicknessType*=UNIFORM. The default value is 0.0.
numIntPts
An Int specifying the number of integration points to be used through the section.
Possible values are *numIntPts* >> 0. The default value is 5.To use the default settings
of the analysis products, set *numIntPts* to 5 if *integrationRule*=SIMPSON or set
*numIntPts* to 7 if *integrationRule*=GAUSS.
thicknessType
A SymbolicConstant specifying the distribution used for defining the thickness of the
elements. Possible values are UNIFORM, ANALYTICAL_FIELD, DISCRETE_FIELD,
NODAL_ANALYTICAL_FIELD, and NODAL_DISCRETE_FIELD. The default value is UNIFORM.
preIntegrate
A Boolean specifying whether the shell section properties are specified by the user
prior to the analysis (ON) or integrated during the analysis (OFF). The default value is
OFF.
poissonDefinition
A SymbolicConstant specifying whether to use the default value for the Poisson's ratio.
Possible values are:DEFAULT, specifying that the default value for the Poisson's ratio
is 0.5 in an Abaqus/Standard analysis and is obtained from the material definition in an
Abaqus/Explicit analysis.VALUE, specifying that the Poisson's ratio used in the analysis
is the value provided in *poisson*.The default value is DEFAULT.
poisson
A Float specifying the Poisson's ratio. Possible values are −1.0 ≤≤ *poisson* ≤≤ 0.5.
This argument is valid only when *poissonDefinition*=VALUE. The default value is 0.5.
integrationRule
A SymbolicConstant specifying the shell section integration rule. Possible values are
SIMPSON and GAUSS. The default value is SIMPSON.
temperature
A SymbolicConstant specifying the mode used for temperature and field variable input
across the section thickness. Possible values are GRADIENT and POINTWISE. The default
value is GRADIENT.
idealization
A SymbolicConstant specifying the mechanical idealization used for the section
calculations. This member is only applicable when *preIntegrate* is set to ON. Possible
values are NO_IDEALIZATION, SMEAR_ALL_LAYERS, MEMBRANE, and BENDING. The default value
is NO_IDEALIZATION.
nTemp
None or an Int specifying the number of temperature points to be input. This argument is
valid only when *temperature*=POINTWISE. The default value is None.
thicknessModulus
None or a Float specifying the effective thickness modulus. This argument is relevant
only for continuum shells and must be used in conjunction with the argument *poisson*.
The default value is None.
useDensity
A Boolean specifying whether or not to use the value of *density*. The default value is
OFF.
density
A Float specifying the value of density to apply to this section. The default value is
0.0.
thicknessField
A String specifying the name of the AnalyticalField or DiscreteField object used to
define the thickness of the shell elements. The *thicknessField* argument applies only
when *thicknessType*=ANALYTICAL_FIELD or *thicknessType*=DISCRETE_FIELD. The default
value is an empty string.
nodalThicknessField
A String specifying the name of the AnalyticalField or DiscreteField object used to
define the thickness of the shell elements at each node. The *nodalThicknessField*
argument applies only when *thicknessType*=NODAL_ANALYTICAL_FIELD or
*thicknessType*=NODAL_DISCRETE_FIELD. The default value is an empty string.
Returns
-------
A HomogeneousShellSection object.
"""
self.sections[name] = section = HomogeneousShellSection(name, material, thickness, numIntPts, thicknessType,
preIntegrate, poissonDefinition, poisson,
integrationRule, temperature, idealization, nTemp,
thicknessModulus, useDensity, density, thicknessField,
nodalThicknessField)
return section
def HomogeneousSolidSection(self, name: str, material: str, thickness: float = 1) -> HomogeneousSolidSection:
"""This method creates a HomogeneousSolidSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].HomogeneousSolidSection
session.odbs[name].HomogeneousSolidSection
Parameters
----------
name
A String specifying the repository key.
material
A String specifying the name of the material.
thickness
A Float specifying the thickness of the section. Possible values are None or greater
than zero. The default value is 1.0.
Returns
-------
A HomogeneousSolidSection object.
Raises
------
InvalidNameError
RangeError
"""
self.sections[name] = section = HomogeneousSolidSection(name, material, thickness)
return section
def MembraneSection(self, name: str, material: str, thickness: float = 1,
thicknessType: SymbolicConstant = UNIFORM,
poissonDefinition: SymbolicConstant = DEFAULT, poisson: float = 0,
thicknessField: str = '') -> MembraneSection:
"""This method creates a MembraneSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].MembraneSection
session.odbs[name].MembraneSection
Parameters
----------
name
A String specifying the repository key.
material
A String specifying the name of the material.
thickness
A Float specifying the thickness for the section. Possible values are *thickness* >>
0.0. The default value is 1.0.
thicknessType
A SymbolicConstant specifying the distribution used for defining the thickness of the
elements. Possible values are UNIFORM, ANALYTICAL_FIELD, and DISCRETE_FIELD. The default
value is UNIFORM.
poissonDefinition
A SymbolicConstant specifying whether to use the default value for the Poisson's ratio.
Possible values are:DEFAULT, specifying that the default value for the Poisson's ratio
is 0.5 in an Abaqus/Standard analysis and is obtained from the material definition in an
Abaqus/Explicit analysis.VALUE, specifying that the Poisson's ratio used in the analysis
is the value provided in *poisson*.The default value is DEFAULT.
poisson
A Float specifying the section Poisson's ratio. Possible values are −1.0 ≤≤ *poisson* ≤≤
0.5. This argument is valid only when *poissonDefinition*=VALUE. The default value is
0.5.
thicknessField
A String specifying the name of the AnalyticalField or DiscreteField object used to
define the thickness of the shell elements. The *thicknessField* argument applies only
when *thicknessType*=ANALYTICAL_FIELD or *thicknessType*=DISCRETE_FIELD. The default
value is an empty string.
Returns
-------
A MembraneSection object.
Raises
------
RangeError and InvalidNameError.
"""
self.sections[name] = section = MembraneSection(name, material, thickness, thicknessType, poissonDefinition,
poisson, thicknessField)
return section
def MPCSection(self, name: str, mpcType: SymbolicConstant, userMode: SymbolicConstant = DOF_MODE,
userType: int = 0) -> MPCSection:
"""This method creates a MPCSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].MPCSection
session.odbs[name].MPCSection
Parameters
----------
name
A String specifying the repository key.
mpcType
A SymbolicConstant specifying the MPC type of the section. Possible values are BEAM_MPC,
ELBOW_MPC, PIN_MPC, LINK_MPC, TIE_MPC, and USER_DEFINED.
userMode
A SymbolicConstant specifying the mode of the MPC when it is user-defined. Possible
values are DOF_MODE and NODE_MODE. The default value is DOF_MODE.The *userMode* argument
applies only when *mpcType*=USER_DEFINED.
userType
An Int specifying to differentiate between different constraint types in a user-defined
MPCSection. The default value is 0.The *userType* argument applies only when
*mpcType*=USER_DEFINED.
Returns
-------
A MPCSection object.
Raises
------
RangeError and InvalidNameError.
"""
self.sections[name] = section = MPCSection(name, mpcType, userMode, userType)
return section
def PEGSection(self, name: str, material: str, thickness: float = 1, wedgeAngle1: float = 0,
wedgeAngle2: float = 0) -> PEGSection:
"""This method creates a PEGSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].PEGSection
session.odbs[name].PEGSection
Parameters
----------
name
A String specifying the repository key.
material
A String specifying the name of the material.
thickness
A Float specifying the thickness of the section. Possible values are *thickness* >> 0.0.
The default value is 1.0.
wedgeAngle1
A Float specifying the value of the x component of the angle between the bounding
planes, ΔϕxΔϕx. The default value is 0.0.
wedgeAngle2
A Float specifying the value of the y component of the angle between the bounding
planes, ΔϕyΔϕy. The default value is 0.0.
Returns
-------
A PEGSection object.
Raises
------
InvalidNameError
RangeError
"""
self.sections[name] = section = PEGSection(name, material, thickness, wedgeAngle1, wedgeAngle2)
return section
def SurfaceSection(self, name: str, useDensity: Boolean = OFF, density: float = 0) -> SurfaceSection:
"""This method creates a SurfaceSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].SurfaceSection
session.odbs[name].SurfaceSection
Parameters
----------
name
A String specifying the repository key.
useDensity
A Boolean specifying whether or not to use the value of *density*. The default value is
OFF.
density
A Float specifying the value of density to apply to this section. The default value is
0.0.
Returns
-------
A SurfaceSection object.
Raises
------
RangeError and InvalidNameError.
"""
self.sections[name] = section = SurfaceSection(name, useDensity, density)
return section
def TrussSection(self, name: str, material: str, area: float = 1) -> TrussSection:
"""This method creates a TrussSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].TrussSection
session.odbs[name].TrussSection
Parameters
----------
name
A String specifying the repository key.
material
A String specifying the name of the material.
area
A Float specifying the cross-sectional area for the section. Possible values are *area*
>> 0. The default value is 1.0.
Returns
-------
A TrussSection object.
Raises
------
RangeError and InvalidNameError.
"""
self.sections[name] = section = TrussSection(name, material, area)
return section
| 49.2199
| 158
| 0.62083
|
94aaed640738f8db6e312a440af63926dec40bcb
| 1,391
|
py
|
Python
|
cengal/time_management/load_best_timer/versions/v_0/load_best_timer.py
|
FI-Mihej/Cengal
|
516b9780da6ccc9168f8f89d7ba13dc29e24bc0b
|
[
"Apache-2.0"
] | 3
|
2018-07-23T18:48:58.000Z
|
2021-07-18T14:17:20.000Z
|
cengal/time_management/load_best_timer/versions/v_0/load_best_timer.py
|
FI-Mihej/Cengal
|
516b9780da6ccc9168f8f89d7ba13dc29e24bc0b
|
[
"Apache-2.0"
] | null | null | null |
cengal/time_management/load_best_timer/versions/v_0/load_best_timer.py
|
FI-Mihej/Cengal
|
516b9780da6ccc9168f8f89d7ba13dc29e24bc0b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
# Copyright © 2017 ButenkoMS. All rights reserved. Contacts: <gtalk@butenkoms.space>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import clock
perf_counter = process_time = clock
try:
from time import perf_counter
except ImportError:
pass
try:
from time import process_time
except ImportError:
pass
"""
Module Docstring
Docstrings: http://www.python.org/dev/peps/pep-0257/
"""
__author__ = "ButenkoMS <gtalk@butenkoms.space>"
__copyright__ = "Copyright © 2017 ButenkoMS. All rights reserved. Contacts: <gtalk@butenkoms.space>"
__credits__ = ["ButenkoMS <gtalk@butenkoms.space>", ]
__license__ = "Apache License, Version 2.0"
__version__ = "0.0.1"
__maintainer__ = "ButenkoMS <gtalk@butenkoms.space>"
__email__ = "gtalk@butenkoms.space"
__status__ = "Prototype"
# __status__ = "Development"
# __status__ = "Production"
| 30.23913
| 100
| 0.74982
|
99e7f0fcd3acb9395c834faae1c6969af1bb7b1a
| 39,791
|
py
|
Python
|
src/python/grpcio/grpc/aio/_interceptor.py
|
bostikforever/grpc
|
aae25e24432f733ed4c5f90287845af0113775ee
|
[
"Apache-2.0"
] | 3
|
2017-04-09T06:20:03.000Z
|
2021-06-07T15:13:43.000Z
|
src/python/grpcio/grpc/aio/_interceptor.py
|
bostikforever/grpc
|
aae25e24432f733ed4c5f90287845af0113775ee
|
[
"Apache-2.0"
] | 18
|
2016-11-22T16:35:23.000Z
|
2021-10-15T10:24:14.000Z
|
src/python/grpcio/grpc/aio/_interceptor.py
|
bostikforever/grpc
|
aae25e24432f733ed4c5f90287845af0113775ee
|
[
"Apache-2.0"
] | 2
|
2017-10-11T05:01:29.000Z
|
2019-10-10T17:14:54.000Z
|
# Copyright 2019 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interceptors implementation of gRPC Asyncio Python."""
from abc import ABCMeta
from abc import abstractmethod
import asyncio
import collections
import functools
from typing import (AsyncIterable, Awaitable, Callable, Iterator, Optional,
Sequence, Union)
import grpc
from grpc._cython import cygrpc
from . import _base_call
from ._call import AioRpcError
from ._call import StreamStreamCall
from ._call import StreamUnaryCall
from ._call import UnaryStreamCall
from ._call import UnaryUnaryCall
from ._call import _API_STYLE_ERROR
from ._call import _RPC_ALREADY_FINISHED_DETAILS
from ._call import _RPC_HALF_CLOSED_DETAILS
from ._metadata import Metadata
from ._typing import DeserializingFunction
from ._typing import DoneCallbackType
from ._typing import RequestIterableType
from ._typing import RequestType
from ._typing import ResponseIterableType
from ._typing import ResponseType
from ._typing import SerializingFunction
from ._utils import _timeout_to_deadline
_LOCAL_CANCELLATION_DETAILS = 'Locally cancelled by application!'
class ServerInterceptor(metaclass=ABCMeta):
"""Affords intercepting incoming RPCs on the service-side.
This is an EXPERIMENTAL API.
"""
@abstractmethod
async def intercept_service(
self, continuation: Callable[[grpc.HandlerCallDetails],
Awaitable[grpc.RpcMethodHandler]],
handler_call_details: grpc.HandlerCallDetails
) -> grpc.RpcMethodHandler:
"""Intercepts incoming RPCs before handing them over to a handler.
Args:
continuation: A function that takes a HandlerCallDetails and
proceeds to invoke the next interceptor in the chain, if any,
or the RPC handler lookup logic, with the call details passed
as an argument, and returns an RpcMethodHandler instance if
the RPC is considered serviced, or None otherwise.
handler_call_details: A HandlerCallDetails describing the RPC.
Returns:
An RpcMethodHandler with which the RPC may be serviced if the
interceptor chooses to service this RPC, or None otherwise.
"""
class ClientCallDetails(
collections.namedtuple(
'ClientCallDetails',
('method', 'timeout', 'metadata', 'credentials', 'wait_for_ready')),
grpc.ClientCallDetails):
"""Describes an RPC to be invoked.
This is an EXPERIMENTAL API.
Args:
method: The method name of the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
metadata: Optional metadata to be transmitted to the service-side of
the RPC.
credentials: An optional CallCredentials for the RPC.
wait_for_ready: This is an EXPERIMENTAL argument. An optional
flag to enable :term:`wait_for_ready` mechanism.
"""
method: str
timeout: Optional[float]
metadata: Optional[Metadata]
credentials: Optional[grpc.CallCredentials]
wait_for_ready: Optional[bool]
class ClientInterceptor(metaclass=ABCMeta):
"""Base class used for all Aio Client Interceptor classes"""
class UnaryUnaryClientInterceptor(ClientInterceptor, metaclass=ABCMeta):
"""Affords intercepting unary-unary invocations."""
@abstractmethod
async def intercept_unary_unary(
self, continuation: Callable[[ClientCallDetails, RequestType],
UnaryUnaryCall],
client_call_details: ClientCallDetails,
request: RequestType) -> Union[UnaryUnaryCall, ResponseType]:
"""Intercepts a unary-unary invocation asynchronously.
Args:
continuation: A coroutine that proceeds with the invocation by
executing the next interceptor in the chain or invoking the
actual RPC on the underlying Channel. It is the interceptor's
responsibility to call it if it decides to move the RPC forward.
The interceptor can use
`call = await continuation(client_call_details, request)`
to continue with the RPC. `continuation` returns the call to the
RPC.
client_call_details: A ClientCallDetails object describing the
outgoing RPC.
request: The request value for the RPC.
Returns:
An object with the RPC response.
Raises:
AioRpcError: Indicating that the RPC terminated with non-OK status.
asyncio.CancelledError: Indicating that the RPC was canceled.
"""
class UnaryStreamClientInterceptor(ClientInterceptor, metaclass=ABCMeta):
"""Affords intercepting unary-stream invocations."""
@abstractmethod
async def intercept_unary_stream(
self, continuation: Callable[[ClientCallDetails, RequestType],
UnaryStreamCall],
client_call_details: ClientCallDetails, request: RequestType
) -> Union[ResponseIterableType, UnaryStreamCall]:
"""Intercepts a unary-stream invocation asynchronously.
The function could return the call object or an asynchronous
iterator, in case of being an asyncrhonous iterator this will
become the source of the reads done by the caller.
Args:
continuation: A coroutine that proceeds with the invocation by
executing the next interceptor in the chain or invoking the
actual RPC on the underlying Channel. It is the interceptor's
responsibility to call it if it decides to move the RPC forward.
The interceptor can use
`call = await continuation(client_call_details, request)`
to continue with the RPC. `continuation` returns the call to the
RPC.
client_call_details: A ClientCallDetails object describing the
outgoing RPC.
request: The request value for the RPC.
Returns:
The RPC Call or an asynchronous iterator.
Raises:
AioRpcError: Indicating that the RPC terminated with non-OK status.
asyncio.CancelledError: Indicating that the RPC was canceled.
"""
class StreamUnaryClientInterceptor(ClientInterceptor, metaclass=ABCMeta):
"""Affords intercepting stream-unary invocations."""
@abstractmethod
async def intercept_stream_unary(
self,
continuation: Callable[[ClientCallDetails, RequestType],
StreamUnaryCall],
client_call_details: ClientCallDetails,
request_iterator: RequestIterableType,
) -> StreamUnaryCall:
"""Intercepts a stream-unary invocation asynchronously.
Within the interceptor the usage of the call methods like `write` or
even awaiting the call should be done carefully, since the caller
could be expecting an untouched call, for example for start writing
messages to it.
Args:
continuation: A coroutine that proceeds with the invocation by
executing the next interceptor in the chain or invoking the
actual RPC on the underlying Channel. It is the interceptor's
responsibility to call it if it decides to move the RPC forward.
The interceptor can use
`call = await continuation(client_call_details, request_iterator)`
to continue with the RPC. `continuation` returns the call to the
RPC.
client_call_details: A ClientCallDetails object describing the
outgoing RPC.
request_iterator: The request iterator that will produce requests
for the RPC.
Returns:
The RPC Call.
Raises:
AioRpcError: Indicating that the RPC terminated with non-OK status.
asyncio.CancelledError: Indicating that the RPC was canceled.
"""
class StreamStreamClientInterceptor(ClientInterceptor, metaclass=ABCMeta):
"""Affords intercepting stream-stream invocations."""
@abstractmethod
async def intercept_stream_stream(
self,
continuation: Callable[[ClientCallDetails, RequestType],
StreamStreamCall],
client_call_details: ClientCallDetails,
request_iterator: RequestIterableType,
) -> Union[ResponseIterableType, StreamStreamCall]:
"""Intercepts a stream-stream invocation asynchronously.
Within the interceptor the usage of the call methods like `write` or
even awaiting the call should be done carefully, since the caller
could be expecting an untouched call, for example for start writing
messages to it.
The function could return the call object or an asynchronous
iterator, in case of being an asyncrhonous iterator this will
become the source of the reads done by the caller.
Args:
continuation: A coroutine that proceeds with the invocation by
executing the next interceptor in the chain or invoking the
actual RPC on the underlying Channel. It is the interceptor's
responsibility to call it if it decides to move the RPC forward.
The interceptor can use
`call = await continuation(client_call_details, request_iterator)`
to continue with the RPC. `continuation` returns the call to the
RPC.
client_call_details: A ClientCallDetails object describing the
outgoing RPC.
request_iterator: The request iterator that will produce requests
for the RPC.
Returns:
The RPC Call or an asynchronous iterator.
Raises:
AioRpcError: Indicating that the RPC terminated with non-OK status.
asyncio.CancelledError: Indicating that the RPC was canceled.
"""
class InterceptedCall:
"""Base implementation for all intercepted call arities.
Interceptors might have some work to do before the RPC invocation with
the capacity of changing the invocation parameters, and some work to do
after the RPC invocation with the capacity for accessing to the wrapped
`UnaryUnaryCall`.
It handles also early and later cancellations, when the RPC has not even
started and the execution is still held by the interceptors or when the
RPC has finished but again the execution is still held by the interceptors.
Once the RPC is finally executed, all methods are finally done against the
intercepted call, being at the same time the same call returned to the
interceptors.
As a base class for all of the interceptors implements the logic around
final status, metadata and cancellation.
"""
_interceptors_task: asyncio.Task
_pending_add_done_callbacks: Sequence[DoneCallbackType]
def __init__(self, interceptors_task: asyncio.Task) -> None:
self._interceptors_task = interceptors_task
self._pending_add_done_callbacks = []
self._interceptors_task.add_done_callback(
self._fire_or_add_pending_done_callbacks)
def __del__(self):
self.cancel()
def _fire_or_add_pending_done_callbacks(
self, interceptors_task: asyncio.Task) -> None:
if not self._pending_add_done_callbacks:
return
call_completed = False
try:
call = interceptors_task.result()
if call.done():
call_completed = True
except (AioRpcError, asyncio.CancelledError):
call_completed = True
if call_completed:
for callback in self._pending_add_done_callbacks:
callback(self)
else:
for callback in self._pending_add_done_callbacks:
callback = functools.partial(self._wrap_add_done_callback,
callback)
call.add_done_callback(callback)
self._pending_add_done_callbacks = []
def _wrap_add_done_callback(self, callback: DoneCallbackType,
unused_call: _base_call.Call) -> None:
callback(self)
def cancel(self) -> bool:
if not self._interceptors_task.done():
# There is no yet the intercepted call available,
# Trying to cancel it by using the generic Asyncio
# cancellation method.
return self._interceptors_task.cancel()
try:
call = self._interceptors_task.result()
except AioRpcError:
return False
except asyncio.CancelledError:
return False
return call.cancel()
def cancelled(self) -> bool:
if not self._interceptors_task.done():
return False
try:
call = self._interceptors_task.result()
except AioRpcError as err:
return err.code() == grpc.StatusCode.CANCELLED
except asyncio.CancelledError:
return True
return call.cancelled()
def done(self) -> bool:
if not self._interceptors_task.done():
return False
try:
call = self._interceptors_task.result()
except (AioRpcError, asyncio.CancelledError):
return True
return call.done()
def add_done_callback(self, callback: DoneCallbackType) -> None:
if not self._interceptors_task.done():
self._pending_add_done_callbacks.append(callback)
return
try:
call = self._interceptors_task.result()
except (AioRpcError, asyncio.CancelledError):
callback(self)
return
if call.done():
callback(self)
else:
callback = functools.partial(self._wrap_add_done_callback, callback)
call.add_done_callback(callback)
def time_remaining(self) -> Optional[float]:
raise NotImplementedError()
async def initial_metadata(self) -> Optional[Metadata]:
try:
call = await self._interceptors_task
except AioRpcError as err:
return err.initial_metadata()
except asyncio.CancelledError:
return None
return await call.initial_metadata()
async def trailing_metadata(self) -> Optional[Metadata]:
try:
call = await self._interceptors_task
except AioRpcError as err:
return err.trailing_metadata()
except asyncio.CancelledError:
return None
return await call.trailing_metadata()
async def code(self) -> grpc.StatusCode:
try:
call = await self._interceptors_task
except AioRpcError as err:
return err.code()
except asyncio.CancelledError:
return grpc.StatusCode.CANCELLED
return await call.code()
async def details(self) -> str:
try:
call = await self._interceptors_task
except AioRpcError as err:
return err.details()
except asyncio.CancelledError:
return _LOCAL_CANCELLATION_DETAILS
return await call.details()
async def debug_error_string(self) -> Optional[str]:
try:
call = await self._interceptors_task
except AioRpcError as err:
return err.debug_error_string()
except asyncio.CancelledError:
return ''
return await call.debug_error_string()
async def wait_for_connection(self) -> None:
call = await self._interceptors_task
return await call.wait_for_connection()
class _InterceptedUnaryResponseMixin:
def __await__(self):
call = yield from self._interceptors_task.__await__()
response = yield from call.__await__()
return response
class _InterceptedStreamResponseMixin:
_response_aiter: Optional[AsyncIterable[ResponseType]]
def _init_stream_response_mixin(self) -> None:
# Is initalized later, otherwise if the iterator is not finnally
# consumed a logging warning is emmited by Asyncio.
self._response_aiter = None
async def _wait_for_interceptor_task_response_iterator(
self) -> ResponseType:
call = await self._interceptors_task
async for response in call:
yield response
def __aiter__(self) -> AsyncIterable[ResponseType]:
if self._response_aiter is None:
self._response_aiter = self._wait_for_interceptor_task_response_iterator(
)
return self._response_aiter
async def read(self) -> ResponseType:
if self._response_aiter is None:
self._response_aiter = self._wait_for_interceptor_task_response_iterator(
)
return await self._response_aiter.asend(None)
class _InterceptedStreamRequestMixin:
_write_to_iterator_async_gen: Optional[AsyncIterable[RequestType]]
_write_to_iterator_queue: Optional[asyncio.Queue]
_FINISH_ITERATOR_SENTINEL = object()
def _init_stream_request_mixin(
self, request_iterator: Optional[RequestIterableType]
) -> RequestIterableType:
if request_iterator is None:
# We provide our own request iterator which is a proxy
# of the futures writes that will be done by the caller.
self._write_to_iterator_queue = asyncio.Queue(maxsize=1)
self._write_to_iterator_async_gen = self._proxy_writes_as_request_iterator(
)
request_iterator = self._write_to_iterator_async_gen
else:
self._write_to_iterator_queue = None
return request_iterator
async def _proxy_writes_as_request_iterator(self):
await self._interceptors_task
while True:
value = await self._write_to_iterator_queue.get()
if value is _InterceptedStreamRequestMixin._FINISH_ITERATOR_SENTINEL:
break
yield value
async def write(self, request: RequestType) -> None:
# If no queue was created it means that requests
# should be expected through an iterators provided
# by the caller.
if self._write_to_iterator_queue is None:
raise cygrpc.UsageError(_API_STYLE_ERROR)
try:
call = await self._interceptors_task
except (asyncio.CancelledError, AioRpcError):
raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
if call.done():
raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
elif call._done_writing_flag:
raise asyncio.InvalidStateError(_RPC_HALF_CLOSED_DETAILS)
# Write might never end up since the call could abrubtly finish,
# we give up on the first awaitable object that finishes.
_, _ = await asyncio.wait(
(self._loop.create_task(self._write_to_iterator_queue.put(request)),
self._loop.create_task(call.code())),
return_when=asyncio.FIRST_COMPLETED)
if call.done():
raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
async def done_writing(self) -> None:
"""Signal peer that client is done writing.
This method is idempotent.
"""
# If no queue was created it means that requests
# should be expected through an iterators provided
# by the caller.
if self._write_to_iterator_queue is None:
raise cygrpc.UsageError(_API_STYLE_ERROR)
try:
call = await self._interceptors_task
except asyncio.CancelledError:
raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
# Write might never end up since the call could abrubtly finish,
# we give up on the first awaitable object that finishes.
_, _ = await asyncio.wait((self._write_to_iterator_queue.put(
_InterceptedStreamRequestMixin._FINISH_ITERATOR_SENTINEL),
call.code()),
return_when=asyncio.FIRST_COMPLETED)
class InterceptedUnaryUnaryCall(_InterceptedUnaryResponseMixin, InterceptedCall,
_base_call.UnaryUnaryCall):
"""Used for running a `UnaryUnaryCall` wrapped by interceptors.
For the `__await__` method is it is proxied to the intercepted call only when
the interceptor task is finished.
"""
_loop: asyncio.AbstractEventLoop
_channel: cygrpc.AioChannel
# pylint: disable=too-many-arguments
def __init__(self, interceptors: Sequence[UnaryUnaryClientInterceptor],
request: RequestType, timeout: Optional[float],
metadata: Metadata,
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
method: bytes, request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction,
loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._channel = channel
interceptors_task = loop.create_task(
self._invoke(interceptors, method, timeout, metadata, credentials,
wait_for_ready, request, request_serializer,
response_deserializer))
super().__init__(interceptors_task)
# pylint: disable=too-many-arguments
async def _invoke(
self, interceptors: Sequence[UnaryUnaryClientInterceptor],
method: bytes, timeout: Optional[float],
metadata: Optional[Metadata],
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], request: RequestType,
request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction) -> UnaryUnaryCall:
"""Run the RPC call wrapped in interceptors"""
async def _run_interceptor(
interceptors: Iterator[UnaryUnaryClientInterceptor],
client_call_details: ClientCallDetails,
request: RequestType) -> _base_call.UnaryUnaryCall:
interceptor = next(interceptors, None)
if interceptor:
continuation = functools.partial(_run_interceptor, interceptors)
call_or_response = await interceptor.intercept_unary_unary(
continuation, client_call_details, request)
if isinstance(call_or_response, _base_call.UnaryUnaryCall):
return call_or_response
else:
return UnaryUnaryCallResponse(call_or_response)
else:
return UnaryUnaryCall(
request, _timeout_to_deadline(client_call_details.timeout),
client_call_details.metadata,
client_call_details.credentials,
client_call_details.wait_for_ready, self._channel,
client_call_details.method, request_serializer,
response_deserializer, self._loop)
client_call_details = ClientCallDetails(method, timeout, metadata,
credentials, wait_for_ready)
return await _run_interceptor(iter(interceptors), client_call_details,
request)
def time_remaining(self) -> Optional[float]:
raise NotImplementedError()
class InterceptedUnaryStreamCall(_InterceptedStreamResponseMixin,
InterceptedCall, _base_call.UnaryStreamCall):
"""Used for running a `UnaryStreamCall` wrapped by interceptors."""
_loop: asyncio.AbstractEventLoop
_channel: cygrpc.AioChannel
_last_returned_call_from_interceptors = Optional[_base_call.UnaryStreamCall]
# pylint: disable=too-many-arguments
def __init__(self, interceptors: Sequence[UnaryStreamClientInterceptor],
request: RequestType, timeout: Optional[float],
metadata: Metadata,
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
method: bytes, request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction,
loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._channel = channel
self._init_stream_response_mixin()
self._last_returned_call_from_interceptors = None
interceptors_task = loop.create_task(
self._invoke(interceptors, method, timeout, metadata, credentials,
wait_for_ready, request, request_serializer,
response_deserializer))
super().__init__(interceptors_task)
# pylint: disable=too-many-arguments
async def _invoke(
self, interceptors: Sequence[UnaryUnaryClientInterceptor],
method: bytes, timeout: Optional[float],
metadata: Optional[Metadata],
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], request: RequestType,
request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction) -> UnaryStreamCall:
"""Run the RPC call wrapped in interceptors"""
async def _run_interceptor(
interceptors: Iterator[UnaryStreamClientInterceptor],
client_call_details: ClientCallDetails,
request: RequestType,
) -> _base_call.UnaryUnaryCall:
interceptor = next(interceptors, None)
if interceptor:
continuation = functools.partial(_run_interceptor, interceptors)
call_or_response_iterator = await interceptor.intercept_unary_stream(
continuation, client_call_details, request)
if isinstance(call_or_response_iterator,
_base_call.UnaryStreamCall):
self._last_returned_call_from_interceptors = call_or_response_iterator
else:
self._last_returned_call_from_interceptors = UnaryStreamCallResponseIterator(
self._last_returned_call_from_interceptors,
call_or_response_iterator)
return self._last_returned_call_from_interceptors
else:
self._last_returned_call_from_interceptors = UnaryStreamCall(
request, _timeout_to_deadline(client_call_details.timeout),
client_call_details.metadata,
client_call_details.credentials,
client_call_details.wait_for_ready, self._channel,
client_call_details.method, request_serializer,
response_deserializer, self._loop)
return self._last_returned_call_from_interceptors
client_call_details = ClientCallDetails(method, timeout, metadata,
credentials, wait_for_ready)
return await _run_interceptor(iter(interceptors), client_call_details,
request)
def time_remaining(self) -> Optional[float]:
raise NotImplementedError()
class InterceptedStreamUnaryCall(_InterceptedUnaryResponseMixin,
_InterceptedStreamRequestMixin,
InterceptedCall, _base_call.StreamUnaryCall):
"""Used for running a `StreamUnaryCall` wrapped by interceptors.
For the `__await__` method is it is proxied to the intercepted call only when
the interceptor task is finished.
"""
_loop: asyncio.AbstractEventLoop
_channel: cygrpc.AioChannel
# pylint: disable=too-many-arguments
def __init__(self, interceptors: Sequence[StreamUnaryClientInterceptor],
request_iterator: Optional[RequestIterableType],
timeout: Optional[float], metadata: Metadata,
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
method: bytes, request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction,
loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._channel = channel
request_iterator = self._init_stream_request_mixin(request_iterator)
interceptors_task = loop.create_task(
self._invoke(interceptors, method, timeout, metadata, credentials,
wait_for_ready, request_iterator, request_serializer,
response_deserializer))
super().__init__(interceptors_task)
# pylint: disable=too-many-arguments
async def _invoke(
self, interceptors: Sequence[StreamUnaryClientInterceptor],
method: bytes, timeout: Optional[float],
metadata: Optional[Metadata],
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool],
request_iterator: RequestIterableType,
request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction) -> StreamUnaryCall:
"""Run the RPC call wrapped in interceptors"""
async def _run_interceptor(
interceptors: Iterator[UnaryUnaryClientInterceptor],
client_call_details: ClientCallDetails,
request_iterator: RequestIterableType
) -> _base_call.StreamUnaryCall:
interceptor = next(interceptors, None)
if interceptor:
continuation = functools.partial(_run_interceptor, interceptors)
return await interceptor.intercept_stream_unary(
continuation, client_call_details, request_iterator)
else:
return StreamUnaryCall(
request_iterator,
_timeout_to_deadline(client_call_details.timeout),
client_call_details.metadata,
client_call_details.credentials,
client_call_details.wait_for_ready, self._channel,
client_call_details.method, request_serializer,
response_deserializer, self._loop)
client_call_details = ClientCallDetails(method, timeout, metadata,
credentials, wait_for_ready)
return await _run_interceptor(iter(interceptors), client_call_details,
request_iterator)
def time_remaining(self) -> Optional[float]:
raise NotImplementedError()
class InterceptedStreamStreamCall(_InterceptedStreamResponseMixin,
_InterceptedStreamRequestMixin,
InterceptedCall, _base_call.StreamStreamCall):
"""Used for running a `StreamStreamCall` wrapped by interceptors."""
_loop: asyncio.AbstractEventLoop
_channel: cygrpc.AioChannel
_last_returned_call_from_interceptors = Optional[_base_call.UnaryStreamCall]
# pylint: disable=too-many-arguments
def __init__(self, interceptors: Sequence[StreamStreamClientInterceptor],
request_iterator: Optional[RequestIterableType],
timeout: Optional[float], metadata: Metadata,
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
method: bytes, request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction,
loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._channel = channel
self._init_stream_response_mixin()
request_iterator = self._init_stream_request_mixin(request_iterator)
self._last_returned_call_from_interceptors = None
interceptors_task = loop.create_task(
self._invoke(interceptors, method, timeout, metadata, credentials,
wait_for_ready, request_iterator, request_serializer,
response_deserializer))
super().__init__(interceptors_task)
# pylint: disable=too-many-arguments
async def _invoke(
self, interceptors: Sequence[StreamStreamClientInterceptor],
method: bytes, timeout: Optional[float],
metadata: Optional[Metadata],
credentials: Optional[grpc.CallCredentials],
wait_for_ready: Optional[bool],
request_iterator: RequestIterableType,
request_serializer: SerializingFunction,
response_deserializer: DeserializingFunction) -> StreamStreamCall:
"""Run the RPC call wrapped in interceptors"""
async def _run_interceptor(
interceptors: Iterator[StreamStreamClientInterceptor],
client_call_details: ClientCallDetails,
request_iterator: RequestIterableType
) -> _base_call.StreamStreamCall:
interceptor = next(interceptors, None)
if interceptor:
continuation = functools.partial(_run_interceptor, interceptors)
call_or_response_iterator = await interceptor.intercept_stream_stream(
continuation, client_call_details, request_iterator)
if isinstance(call_or_response_iterator,
_base_call.StreamStreamCall):
self._last_returned_call_from_interceptors = call_or_response_iterator
else:
self._last_returned_call_from_interceptors = StreamStreamCallResponseIterator(
self._last_returned_call_from_interceptors,
call_or_response_iterator)
return self._last_returned_call_from_interceptors
else:
self._last_returned_call_from_interceptors = StreamStreamCall(
request_iterator,
_timeout_to_deadline(client_call_details.timeout),
client_call_details.metadata,
client_call_details.credentials,
client_call_details.wait_for_ready, self._channel,
client_call_details.method, request_serializer,
response_deserializer, self._loop)
return self._last_returned_call_from_interceptors
client_call_details = ClientCallDetails(method, timeout, metadata,
credentials, wait_for_ready)
return await _run_interceptor(iter(interceptors), client_call_details,
request_iterator)
def time_remaining(self) -> Optional[float]:
raise NotImplementedError()
class UnaryUnaryCallResponse(_base_call.UnaryUnaryCall):
"""Final UnaryUnaryCall class finished with a response."""
_response: ResponseType
def __init__(self, response: ResponseType) -> None:
self._response = response
def cancel(self) -> bool:
return False
def cancelled(self) -> bool:
return False
def done(self) -> bool:
return True
def add_done_callback(self, unused_callback) -> None:
raise NotImplementedError()
def time_remaining(self) -> Optional[float]:
raise NotImplementedError()
async def initial_metadata(self) -> Optional[Metadata]:
return None
async def trailing_metadata(self) -> Optional[Metadata]:
return None
async def code(self) -> grpc.StatusCode:
return grpc.StatusCode.OK
async def details(self) -> str:
return ''
async def debug_error_string(self) -> Optional[str]:
return None
def __await__(self):
if False: # pylint: disable=using-constant-test
# This code path is never used, but a yield statement is needed
# for telling the interpreter that __await__ is a generator.
yield None
return self._response
async def wait_for_connection(self) -> None:
pass
class _StreamCallResponseIterator:
_call: Union[_base_call.UnaryStreamCall, _base_call.StreamStreamCall]
_response_iterator: AsyncIterable[ResponseType]
def __init__(self, call: Union[_base_call.UnaryStreamCall,
_base_call.StreamStreamCall],
response_iterator: AsyncIterable[ResponseType]) -> None:
self._response_iterator = response_iterator
self._call = call
def cancel(self) -> bool:
return self._call.cancel()
def cancelled(self) -> bool:
return self._call.cancelled()
def done(self) -> bool:
return self._call.done()
def add_done_callback(self, callback) -> None:
self._call.add_done_callback(callback)
def time_remaining(self) -> Optional[float]:
return self._call.time_remaining()
async def initial_metadata(self) -> Optional[Metadata]:
return await self._call.initial_metadata()
async def trailing_metadata(self) -> Optional[Metadata]:
return await self._call.trailing_metadata()
async def code(self) -> grpc.StatusCode:
return await self._call.code()
async def details(self) -> str:
return await self._call.details()
async def debug_error_string(self) -> Optional[str]:
return await self._call.debug_error_string()
def __aiter__(self):
return self._response_iterator.__aiter__()
async def wait_for_connection(self) -> None:
return await self._call.wait_for_connection()
class UnaryStreamCallResponseIterator(_StreamCallResponseIterator,
_base_call.UnaryStreamCall):
"""UnaryStreamCall class wich uses an alternative response iterator."""
async def read(self) -> ResponseType:
# Behind the scenes everyting goes through the
# async iterator. So this path should not be reached.
raise NotImplementedError()
class StreamStreamCallResponseIterator(_StreamCallResponseIterator,
_base_call.StreamStreamCall):
"""StreamStreamCall class wich uses an alternative response iterator."""
async def read(self) -> ResponseType:
# Behind the scenes everyting goes through the
# async iterator. So this path should not be reached.
raise NotImplementedError()
async def write(self, request: RequestType) -> None:
# Behind the scenes everyting goes through the
# async iterator provided by the InterceptedStreamStreamCall.
# So this path should not be reached.
raise NotImplementedError()
async def done_writing(self) -> None:
# Behind the scenes everyting goes through the
# async iterator provided by the InterceptedStreamStreamCall.
# So this path should not be reached.
raise NotImplementedError()
@property
def _done_writing_flag(self) -> bool:
return self._call._done_writing_flag
| 39.830831
| 98
| 0.658591
|
a1e337669acc0380df9fa58693d107125a892d1a
| 2,210
|
py
|
Python
|
medium/shadows-of-the-knight-episode-1.py
|
jraximus/codingame-solutions
|
2e4931cc8fe21e3be71dbe0d61722af416757d6b
|
[
"MIT"
] | null | null | null |
medium/shadows-of-the-knight-episode-1.py
|
jraximus/codingame-solutions
|
2e4931cc8fe21e3be71dbe0d61722af416757d6b
|
[
"MIT"
] | null | null | null |
medium/shadows-of-the-knight-episode-1.py
|
jraximus/codingame-solutions
|
2e4931cc8fe21e3be71dbe0d61722af416757d6b
|
[
"MIT"
] | null | null | null |
#https://www.codingame.com/training/medium/shadows-of-the-knight-episode-1
import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
debug = False
# w: width of the building.
# h: height of the building.
w, h = [int(i) for i in raw_input().split()]
n = int(raw_input()) # maximum number of turns before game over.
x0, y0 = [int(i) for i in raw_input().split()]
px0 = x0
py0 = y0
x1 = x0
y1 = y0
x2 = None
y2 = None
prev_dir = None
if debug:
print >> sys.stderr, "width: " + str(w) + " height: " + str(h)
print >> sys.stderr, "# of total jumps: " + str(n)
# game loop
while True:
bomb_dir = raw_input() # the direction of the bombs from batman's current location (U, UR, R, DR, D, DL, L or UL)
if debug:
print >> sys.stderr, "bomb_dir: " + str(bomb_dir)
x_deviation = 0
y_deviation = 0
if "U" in bomb_dir:
if y2 == None:
y2 = 0
if prev_dir !=None and "D" in prev_dir:
y2 = py0
y_deviation = int(abs(round((y2 - y1)/2))) * -1
#y_deviation = -1
elif "D" in bomb_dir:
if y2 == None:
y2 = h
if prev_dir !=None and "U" in prev_dir:
y2 = py0
y_deviation = int(abs(round((y2 - y1)/2)))
#y_deviation = 1
if "L" in bomb_dir:
if x2 == None:
x2 = 0
if prev_dir !=None and "R" in prev_dir:
x2 = px0
x_deviation = int(abs(round((x2 - x1)/2))) * -1
#x_deviation = -1
elif "R" in bomb_dir:
if x2 == None:
x2 = w
if prev_dir !=None and "L" in prev_dir:
x2 = px0
x_deviation = int(abs(round((x2 - x1)/2)))
#x_deviation = 1
if debug:
print >> sys.stderr, "(x0,y0) - (" + str(x0) + "," + str(y0) + ")"
print >> sys.stderr, "(x1,y1) - (" + str(x0 + x_deviation) + "," + str(y0 + y_deviation) + ")"
# state saving current jumps for next step
px0 = x0
py0 = y0
prev_dir = bomb_dir
x0 = x0 + x_deviation
y0 = y0 + y_deviation
x1 = x0
y1 = y0
print str(x0) + " " + str(y0)
| 26
| 118
| 0.533032
|
902131b0ca121204667099fe0aa173a5c8774875
| 2,538
|
py
|
Python
|
tests/conftest.py
|
chrishas35/fava
|
8a273fa4b92ab6df875209ad8ea83c30984ce0f4
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
chrishas35/fava
|
8a273fa4b92ab6df875209ad8ea83c30984ce0f4
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
chrishas35/fava
|
8a273fa4b92ab6df875209ad8ea83c30984ce0f4
|
[
"MIT"
] | null | null | null |
# pylint: disable=missing-docstring
import os
from pathlib import Path
from pprint import pformat
import pytest
from beancount.loader import load_string
from fava.core import FavaLedger
from fava.application import _load_file, app as fava_app
from fava.core.budgets import parse_budgets
def create_app(bfile):
key = "BEANCOUNT_FILES"
if (key not in fava_app.config) or (fava_app.config[key] != [bfile]):
fava_app.config[key] = [bfile]
_load_file()
def data_file(filename):
return os.path.join(os.path.dirname(__file__), "data", filename)
EXAMPLE_FILE = data_file("long-example.beancount")
EXTENSION_REPORT_EXAMPLE_FILE = data_file("extension-report-example.beancount")
API = FavaLedger(EXAMPLE_FILE)
fava_app.testing = True
TEST_CLIENT = fava_app.test_client()
create_app(EXAMPLE_FILE)
SNAPSHOT_UPDATE = bool(os.environ.get("SNAPSHOT_UPDATE"))
MSG = "Maybe snapshots need to be updated with `SNAPSHOT_UPDATE=1 make test`?"
@pytest.fixture
def snapshot(request):
file_path = Path(request.fspath)
fn_name = request.function.__name__
snap_dir = file_path.parent / "__snapshots__"
if not snap_dir.exists():
snap_dir.mkdir()
def _snapshot_data(data, item=None):
snap_file = (
snap_dir / f"{file_path.name}-{fn_name}-{item}"
if item
else snap_dir / f"{file_path.name}-{fn_name}"
)
out = pformat(data)
if not snap_file.exists():
contents = ""
else:
contents = open(snap_file).read()
if SNAPSHOT_UPDATE:
open(snap_file, "w").write(out)
return
assert out == contents, MSG
return _snapshot_data
@pytest.fixture
def extension_report_app():
create_app(EXTENSION_REPORT_EXAMPLE_FILE)
return fava_app
@pytest.fixture
def app():
create_app(EXAMPLE_FILE)
return fava_app
@pytest.fixture
def test_client():
return TEST_CLIENT
@pytest.fixture
def load_doc(request):
return load_string(request.function.__doc__, dedent=True)
@pytest.fixture
def extension_report_ledger():
return FavaLedger(EXTENSION_REPORT_EXAMPLE_FILE)
@pytest.fixture
def small_example_ledger():
return FavaLedger(data_file("example.beancount"))
@pytest.fixture
def example_ledger():
yield API
API.filter(account=None, filter=None, time=None)
@pytest.fixture
def budgets_doc(request):
entries, _, _ = load_string(request.function.__doc__, dedent=True)
budgets, _ = parse_budgets(entries)
return budgets
| 23.072727
| 79
| 0.706462
|
3b6e2509420e0ebab88e0090c75786d644a74c39
| 2,978
|
py
|
Python
|
research/cv/DBPN/src/trainonestep/trainonestepgen.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77
|
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/DBPN/src/trainonestep/trainonestepgen.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3
|
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/DBPN/src/trainonestep/trainonestepgen.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24
|
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TrainOnestepGen network"""
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import context
from mindspore.communication.management import get_group_size
from mindspore.context import ParallelMode
from mindspore.parallel._auto_parallel_context import auto_parallel_context
class TrainOnestepGen(nn.Cell):
"""TrainOnestepGen
Encapsulation class of DBPN network training.
Append an optimizer to the training network after that the construct
function can be called to create the backward graph.
Args:
network(Cell): Generator with loss Cell. Note that loss function should have been added
optimizer(Cell):Optimizer for updating the weights.
sens (Number): The adjust parameter. Default: 1.0.
Outputs:
Tensor
"""
def __init__(self, loss, optimizer, sens=1.0):
super(TrainOnestepGen, self).__init__(auto_prefix=False)
self.optimizer = optimizer
self.loss = loss
self.loss.set_grad()
self.loss.set_train()
self.grad = ops.GradOperation(get_by_list=True, sens_param=True)
self.sens = sens
self.weights = optimizer.parameters
self.reducer_flag = False
self.grad_reducer = None
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
if self.reducer_flag:
mean = context.get_auto_parallel_context("gradients_mean")
if auto_parallel_context().get_device_num_is_set():
degree = context.get_auto_parallel_context("device_num")
else:
degree = get_group_size()
self.grad_reducer = nn.DistributedGradReducer(optimizer.parameters, mean, degree)
def construct(self, HR_img, LR_img):
"""Defines the computation performed."""
weights = self.weights
content_loss = self.loss(HR_img, LR_img)
sens = ops.Fill()(ops.DType()(content_loss), ops.Shape()(content_loss), self.sens)
grads = self.grad(self.loss, weights)(HR_img, LR_img, sens)
if self.reducer_flag:
grads = self.grad_reducer(grads)
return ops.depend(content_loss, self.optimizer(grads))
| 41.943662
| 95
| 0.688381
|
0c1ef7e8f3b1e387511c9834042df32bbe5295ff
| 427
|
py
|
Python
|
Modified Task-1/FileExtension.py
|
Indranil1211/Task-1
|
267f617a026d3032417634c83d9c60c22ecffd5f
|
[
"MIT"
] | null | null | null |
Modified Task-1/FileExtension.py
|
Indranil1211/Task-1
|
267f617a026d3032417634c83d9c60c22ecffd5f
|
[
"MIT"
] | null | null | null |
Modified Task-1/FileExtension.py
|
Indranil1211/Task-1
|
267f617a026d3032417634c83d9c60c22ecffd5f
|
[
"MIT"
] | null | null | null |
a=str(input("Input the filename: "))
if a[-5:]==".java":
print("The extension of the file is: 'Java' ")
elif a[-2:]==".c":
print("The extension of the file is: 'C' ")
elif a[-3:]==".cc" or a[-4:]==".cpp" or a[-4:]==".cxx" :
print("The extension of the file is: 'C++' ")
elif a[-3:]==".py":
print("The extension of the file is: 'Python' ")
else:
print("The extension of the file does not exist.")
| 35.583333
| 57
| 0.552693
|
1e84aeb441ccc4313efd1343ed57676a475c30d5
| 613
|
py
|
Python
|
supplies/routers.py
|
cupracer/family-tools
|
95a9f4d845fca4a00e2b666afc7eb791745121e7
|
[
"MIT"
] | null | null | null |
supplies/routers.py
|
cupracer/family-tools
|
95a9f4d845fca4a00e2b666afc7eb791745121e7
|
[
"MIT"
] | null | null | null |
supplies/routers.py
|
cupracer/family-tools
|
95a9f4d845fca4a00e2b666afc7eb791745121e7
|
[
"MIT"
] | null | null | null |
from rest_framework import routers
from django.urls import path, include
from .viewsets import CategoryViewSet, BrandViewSet, SupplyViewSet, UnitViewSet, SupplyItemViewSet, PackagingViewSet, \
ProductViewSet
router = routers.DefaultRouter()
router.register('categories', CategoryViewSet)
router.register('brands', BrandViewSet)
router.register('units', UnitViewSet)
router.register('packagings', PackagingViewSet)
router.register('supplies', SupplyViewSet)
router.register('products', ProductViewSet)
router.register('supply_items', SupplyItemViewSet)
urlpatterns = [
path('', include(router.urls)),
]
| 32.263158
| 119
| 0.800979
|
7730b58a909cd33bac81b588312fe729ed5a00ea
| 761
|
py
|
Python
|
src/stabilize_proc.py
|
msdev87/Cilialyzer
|
12da0936da6def42f074031a8c7a8260e91d26bd
|
[
"MIT"
] | 1
|
2021-12-11T10:26:22.000Z
|
2021-12-11T10:26:22.000Z
|
src/stabilize_proc.py
|
msdev87/Cilialyzer
|
12da0936da6def42f074031a8c7a8260e91d26bd
|
[
"MIT"
] | null | null | null |
src/stabilize_proc.py
|
msdev87/Cilialyzer
|
12da0936da6def42f074031a8c7a8260e91d26bd
|
[
"MIT"
] | 1
|
2022-01-05T07:05:37.000Z
|
2022-01-05T07:05:37.000Z
|
import numpy
from pystackreg import StackReg
def subproc(args):
# this function defines processes, which are ran in parallel
meanimg = args[0]
array = args[1]
nimgs = array.shape[0]
#print('nimgs: ',nimgs)
array_stabilized = numpy.copy(array)
sr = StackReg(StackReg.RIGID_BODY)
# loop over all images
for i in range(nimgs):
#note that only every second image is registered (performance)
if ((i % 1) == 0):
sr.register(meanimg,array[i,:,:])
# therefore, every second image is transformed as its predecessor
array_stabilized[i,:,:] = sr.transform(array[i,:,:])
#print(array_stabilized.shape[0])
return array_stabilized
| 25.366667
| 73
| 0.607096
|
4f160065441d0c811c962e3984f9caaa977f94de
| 38,250
|
py
|
Python
|
Python/Product/Analyzer/PythonScraper.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Product/Analyzer/PythonScraper.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Product/Analyzer/PythonScraper.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
# ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
# ###########################################################################
"""
Generates information for supporting completion and analysis of Python code.
Outputs a pickled set of dictionaries. The dictionaries are in the format:
top-level: module_table
module_table:
{
'members': member_table,
'doc': doc_string,
}
type_table:
{
'mro' : type_list,
'bases' : type_list,
'members' : member_table,
'doc' : doc_string,
'is_hidden': bool,
'builtin': bool
}
member_table:
{ member_name : member_entry }
member_name: str
member_entry:
{
'kind': member_kind
'value': member_value
'version': version_spec # optional
}
member_kind: 'function' | 'funcref' | 'method' | 'property' | 'data' | 'type' | 'multiple' | 'typeref' | 'moduleref'
member_value: builtin_function | getset_descriptor | data | type_table | multiple_value | typeref | moduleref
moduleref:
{ 'module_name' : name }
typeref:
(
module name, # use '' to omit
type name,
type_list # index types; optional
)
funcref:
{
'func_name' : fully-qualified function name
}
multiple_value:
{ 'members' : (member_entry, ... ) }
builtin_function:
{
'doc': doc string,
'overloads': overload_table,
'builtin' : bool,
'static': bool,
}
overload_table:
[overload, ...]
overload:
{
'args': args_info,
'ret_type': type_list
}
args_info:
(arg_info, ...)
arg_info:
{
'type': type_list,
'name': argument name,
'default_value': repr of default value,
'arg_format' : ('*' | '**')
}
getset_descriptor:
{
'doc': doc string,
'type': type_list
}
data:
{ 'type': type_list }
type_list:
[type_ref, ...]
"""
try:
import cPickle as pickle
except ImportError:
import pickle # Py3k
import datetime
import os
import subprocess
import sys
import traceback
import types
# The version number should match the value of
# PythonTypeDatabase.CurrentVersion in
# \Release\Product\Python\Analysis\Interpreter\PythonTypeDatabase.cs
#
# To update the baseline DB, see Python\Product\PythonTools\RefreshDefaultDB.py
#
CURRENT_DATABASE_VERSION = '25'
# The values in KNOWN_METHOD_TYPES and KNOWN_FUNCTION_TYPES are used when
# detecting the types of members. The names are ('module.name', 'type.name')
# pairs, matching the contents of a typeref.
#
# If the type's name matches an item in KNOWN_METHOD_TYPES, the object is
# treated as a method descriptor.
#
# If the type's name matches an item in KNOWN_FUNCTION_TYPES, the object is
# treated as a method if defined on a type or a function otherwise.
KNOWN_METHOD_TYPES = frozenset(
('sip', 'methoddescriptor'),
)
KNOWN_FUNCTION_TYPES = frozenset(
('numpy', 'ufunc'),
)
# Safe member access methods because PyQt4 contains compiled types that crash
# on operations that should be completely safe, such as getattr() and dir().
# These should be used throughout when accessing members on type objects.
def safe_getattr(obj, attr, default):
try:
return getattr(obj, attr, default)
except:
return default
def safe_hasattr(obj, attr):
try:
return hasattr(obj, attr)
except:
return False
def safe_isinstance(obj, types):
try:
return isinstance(obj, types)
except:
return False
# safe_dir is imported from BuiltinScraper/IronPythonScraper
def safe_repr(obj):
try:
return repr(obj)
except:
return 'invalid object'
if sys.version_info[0] == 2:
builtin_name = '__builtin__'
else:
builtin_name = 'builtins'
TYPE_NAMES = {}
def types_to_typelist(iterable):
return [type_to_typeref(type) for type in iterable]
def type_to_typelist(type):
return [type_to_typeref(type)]
def typename_to_typeref(n1, n2=None):
'''If both n1 and n2 are specified, n1 is the module name and n2 is the type name.
If only n1 is specified, it is the type name.
'''
if n2 is None:
name = ('', n1)
elif n1 == '__builtin__':
name = (builtin_name, n2)
else:
name = (n1, n2)
return memoize_type_name(name)
def type_to_typeref(type):
type_name = safe_getattr(type, '__name__', None)
if not type_name:
print('Cannot get type name of ' + safe_repr(type))
type = object
type_name = 'object'
if safe_hasattr(type, '__module__'):
if safe_getattr(type, '__module__', '') == '__builtin__':
name = (builtin_name, type_name)
else:
name = (type.__module__, type_name)
elif safe_isinstance(type, types.ModuleType):
name = (type_name, '')
else:
name = ('', type_name)
# memoize so when we pickle we can share refs
return memoize_type_name(name)
def memoize_type_name(name):
key = repr(name)
if key in TYPE_NAMES:
return TYPE_NAMES[key]
TYPE_NAMES[key] = name
return name
def maybe_to_typelist(type):
if isinstance(type, list) and len(type) > 0 and isinstance(type[0], tuple) and len(type[0]) > 1 and type[0][1]:
return type
elif isinstance(type, tuple) and len(type) > 1 and type[1]:
return [type]
else:
return type_to_typelist(type)
def generate_overload(ret_type, *args):
'''ret_type is either a type suitable for type_to_typelist, or it is the result of
one of the *_to_typelist() or *_to_typeref() functions.
Each arg is a tuple of ('name', type or type_ref or type_list, '' or '*' or '**', 'default value string')
The last two elements are optional, but the format is required if the default value
is specified.
'''
res = { 'ret_type': maybe_to_typelist(ret_type) }
arglist = []
for arg in args:
arglist.append({ 'name': arg[0], 'type': maybe_to_typelist(arg[1]) })
if len(arg) >= 3 and arg[2]:
arglist[-1]['arg_format'] = arg[2]
if len(arg) >= 4:
arglist[-1]['default_value'] = arg[3]
res['args'] = tuple(arglist)
return res
if sys.platform == "cli":
# provides extra type info when generating against IronPython which can be
# used w/ CPython completions
import IronPythonScraper as BuiltinScraper
else:
import BuiltinScraper
def generate_builtin_function(function, is_method=False):
function_table = {}
func_doc = safe_getattr(function, '__doc__', None)
if safe_isinstance(func_doc, str):
function_table['doc'] = func_doc
function_table['overloads'] = BuiltinScraper.get_overloads(function, is_method)
return function_table
def generate_getset_descriptor(descriptor):
descriptor_table = {}
desc_doc = safe_getattr(descriptor, '__doc__', None)
if safe_isinstance(desc_doc, str):
descriptor_table['doc'] = desc_doc
desc_type = BuiltinScraper.get_descriptor_type(descriptor)
descriptor_table['type'] = type_to_typelist(desc_type)
return descriptor_table
NoneType = type(None)
slot_wrapper_type = type(int.__add__)
method_descriptor_type = type(str.center)
member_descriptor_type = type(property.fdel)
try:
getset_descriptor_type = type(file.closed)
except NameError:
getset_descriptor_type = type(Exception.args) # Py3k, no file
class_method_descriptor_type = type(datetime.date.__dict__['today'])
class OldStyleClass: pass
OldStyleClassType = type(OldStyleClass)
def generate_member_table(obj, is_hidden=False, from_type=False, extra_types=None):
'''Generates a table of members of `obj`.
`is_hidden` determines whether all the members are hidden from IntelliSense.
`from_type` determines whether method descriptors are retained (True) or
ignored (False).
`extra_types` is a sequence of ``(type_name, object)`` pairs to add as types
to this table. These types are always hidden.
'''
sentinel = object()
members = []
for name in BuiltinScraper.safe_dir(obj):
member = safe_getattr(obj, name, sentinel)
if member is not sentinel:
members.append((name, member))
dependencies = {}
table = {}
if extra_types:
for name, member in extra_types:
member_kind, member_value = generate_member(member, is_hidden = True, from_type = from_type)
if member_kind == 'typeref':
actual_name = type_to_typeref(member)
if actual_name not in dependencies:
dependencies[actual_name] = member
table[name] = { 'kind': member_kind, 'value': member_value }
for name, member in members:
member_kind, member_value = generate_member(member, is_hidden, from_type)
if member_kind == 'typeref':
actual_name = type_to_typeref(member)
if actual_name not in dependencies:
dependencies[actual_name] = member
table[name] = { 'kind': member_kind, 'value': member_value }
if dependencies:
obj_mod, obj_name = type_to_typeref(obj)
def needs_type_info(other_mod, other_name):
if obj_mod != other_mod:
if other_mod == builtin_name:
# Never embed builtins (unless obj_mod is builtins, in
# which case the first comparison failed)
return False
# Always embed external types
return True
# We know obj_mod == other_mod at this point
if not obj_name:
# Writing ourselves in the expected place
return True
elif obj_name.startswith(other_name + '.'):
# Always write references to outer types
return False
elif other_name and other_name.startswith(obj_name + '.'):
# Always write type info for inner types
return True
# Otherwise, use a typeref
return False
for (dep_mod, dep_name), dep_obj in dependencies.items():
if needs_type_info(dep_mod, dep_name):
table[dep_name] = {
'kind': 'type',
'value': generate_type(dep_obj, is_hidden = dep_name not in table),
}
return table
def generate_member(obj, is_hidden=False, from_type=False):
try:
# Already handling all exceptions here, so don't worry about using the
# 'safe_*' functions.
if isinstance(obj, (types.BuiltinFunctionType, class_method_descriptor_type)):
return 'function', generate_builtin_function(obj)
elif isinstance(obj, types.FunctionType):
# PyPy - we see plain old Python functions in addition to built-ins
return 'method' if from_type else 'function', generate_builtin_function(obj, from_type)
elif isinstance(obj, (type, OldStyleClassType)):
return 'typeref', type_to_typelist(obj)
elif isinstance(obj, (types.BuiltinMethodType, slot_wrapper_type, method_descriptor_type)):
return 'method', generate_builtin_function(obj, True)
elif isinstance(obj, (getset_descriptor_type, member_descriptor_type)):
return 'property', generate_getset_descriptor(obj)
# Check whether we recognize the type name as one that does not respond
# correctly to isinstance checks.
type_name = type_to_typeref(type(obj))
if type_name in KNOWN_METHOD_TYPES:
return 'method', generate_builtin_function(obj, True)
if type_name in KNOWN_FUNCTION_TYPES:
return 'method' if from_type else 'function', generate_builtin_function(obj, from_type)
# Callable objects with a docstring that provides us with at least one
# overload will be treated as functions rather than data.
if hasattr(obj, '__call__'):
try:
info = generate_builtin_function(obj, from_type)
if info and info['overloads']:
return 'method' if from_type else 'function', info
except:
pass
except:
# Some compiled types fail here, so catch everything and treat the
# object as data.
traceback.print_exc()
print('Treating type as data')
# We don't have any special handling for this object type, so treat it as
# a constant.
return 'data', generate_data(obj)
if sys.version > '3.':
str_types = (str, bytes)
else:
str_types = (str, unicode)
def generate_type_new(type_obj, obj):
if safe_isinstance(obj, (types.BuiltinFunctionType, class_method_descriptor_type)):
function_info = generate_builtin_function(obj)
new_overloads = BuiltinScraper.get_new_overloads(type_obj, obj)
if new_overloads is not None:
# replace overloads with better version if available
function_info['overloads'] = new_overloads
return 'function', function_info
if safe_getattr(obj, '__doc__', '') == 'T.__new__(S, ...) -> a new object with type S, a subtype of T':
doc_str = safe_getattr(type_obj, '__doc__', None)
if not safe_isinstance(doc_str, str_types):
doc_str = ''
return (
'function',
{
'doc': doc_str,
'overloads' : [{'doc': doc_str, 'args': [{'arg_format': '*', 'name': 'args'}] }]
}
)
return generate_member(obj)
def oldstyle_mro(type_obj, res):
type_bases = safe_getattr(type_obj, '__bases__', None)
if not type_bases:
return res
for base in type_bases:
if base not in res:
res.append(type_to_typeref(base))
for base in type_bases:
oldstyle_mro(base, res)
return res
def generate_type(type_obj, is_hidden=False):
type_table = {}
type_mro = safe_getattr(type_obj, '__mro__', None)
if type_mro:
type_table['mro'] = types_to_typelist(type_mro)
else:
type_table['mro'] = oldstyle_mro(type_obj, [])
type_bases = safe_getattr(type_obj, '__bases__', None)
if type_bases:
type_table['bases'] = types_to_typelist(type_bases)
type_doc = safe_getattr(type_obj, '__doc__', None)
if safe_isinstance(type_doc, str):
type_table['doc'] = type_doc
if is_hidden:
type_table['is_hidden'] = True
type_table['members'] = member_table = generate_member_table(type_obj)
if type_obj is object:
member_table['__new__'] = {
'kind' : 'function',
'value': { 'overloads': [generate_overload(object, ('cls', type))] }
}
elif '__new__' not in member_table:
member_table['__new__'] = generate_type_new(type_obj,
safe_getattr(type_obj, '__new__', object.__new__),)
if ('__getattribute__' in member_table and
type_obj is not object and
safe_isinstance(safe_getattr(type_obj, '__getattribute__', None), slot_wrapper_type)):
# skip __getattribute__ on types other than object if it's just a slot
# wrapper.
del member_table['__getattribute__']
return type_table
def generate_data(data_value):
data_table = {}
data_type = type(data_value)
data_table['type'] = type_to_typelist(data_type)
return data_table
def lookup_module(module_name):
try:
module = __import__(module_name)
except:
return None
if '.' in module_name:
for name in module_name.split('.')[1:]:
module = safe_getattr(module, name, None)
if not module:
module = sys.modules[module_name]
return module
def generate_module(module, extra_types=None):
module_table = {}
module_doc = safe_getattr(module, '__doc__', None)
if safe_isinstance(module_doc, str):
module_table['doc'] = module_doc
module_table['members'] = generate_member_table(module, extra_types = extra_types)
return module_table
def get_module_members(module):
"""returns an iterable which gives the names of the module which should be exposed"""
module_all = safe_getattr(module, '__all__', None)
if module_all:
return frozenset(module_all)
return BuiltinScraper.safe_dir(module)
def generate_builtin_module():
extra_types = {}
extra_types['object'] = type(object)
extra_types['function'] = types.FunctionType
extra_types['builtin_function'] = types.BuiltinFunctionType
extra_types['builtin_method_descriptor'] = types.BuiltinMethodType
extra_types['generator'] = types.GeneratorType
extra_types['NoneType'] = NoneType
extra_types['ellipsis'] = type(Ellipsis)
extra_types['module_type'] = types.ModuleType
if sys.version_info[0] == 2:
extra_types['dict_keys'] = type({}.iterkeys())
extra_types['dict_values'] = type({}.itervalues())
extra_types['dict_items'] = type({}.iteritems())
else:
extra_types['dict_keys'] = type({}.keys())
extra_types['dict_values'] = type({}.values())
extra_types['dict_items'] = type({}.items())
extra_types['list_iterator'] = type(iter(list()))
extra_types['tuple_iterator'] = type(iter(tuple()))
extra_types['set_iterator'] = type(iter(set()))
extra_types['str_iterator'] = type(iter(""))
if sys.version_info[0] == 2:
extra_types['bytes_iterator'] = type(iter(""))
extra_types['unicode_iterator'] = type(iter(unicode()))
else:
extra_types['bytes_iterator'] = type(iter(bytes()))
extra_types['unicode_iterator'] = type(iter(""))
extra_types['callable_iterator'] = type(iter(lambda: None, None))
res = generate_module(lookup_module(builtin_name), extra_types = extra_types.items())
if res and 'members' in res and 'object' in res['members']:
assert res['members']['object']['kind'] == 'type', "Unexpected: " + repr(res['members']['object'])
res['members']['object']['value']['doc'] = "The most base type"
return res
def merge_type(baseline_type, new_type):
if 'doc' not in new_type and 'doc' in baseline_type:
new_type['doc'] = baseline_type['doc']
merge_member_table(baseline_type['members'], new_type['members'])
return new_type
def merge_function(baseline_func, new_func):
new_func['overloads'].extend(baseline_func['overloads'])
return new_func
def merge_property(baseline_prop, new_prop):
new_prop['type'].extend(baseline_prop['type'])
return new_prop
def merge_data(baseline_data, new_data):
new_data['type'].extend(baseline_data['type'])
return new_data
def merge_method(baseline_method, new_method):
if baseline_method.get('overloads') is not None:
if new_method.get('overloads') is None:
new_method['overloads'] = baseline_method['overloads']
else:
new_method['overloads'].extend(baseline_method['overloads'])
if 'doc' in baseline_method and 'doc' not in new_method:
new_method['doc'] = baseline_method['doc']
#print 'new doc string'
return new_method
_MERGES = {'type' : merge_type,
'function': merge_method,
'property': merge_property,
'data': merge_data,
'method': merge_method}
def merge_member_table(baseline_table, new_table):
for name, member_table in new_table.items():
base_member_table = baseline_table.get(name, None)
kind = member_table['kind']
if base_member_table is not None and base_member_table['kind'] == kind:
merger = _MERGES.get(kind, None)
if merger is not None:
member_table['value'] = merger(base_member_table['value'], member_table['value'])
#else:
# print('unknown kind')
#elif base_member_table is not None:
# print('kinds differ', kind, base_member_table['kind'], name)
InitMethodEntry = {
'kind': 'method',
'value': {
'doc': 'x.__init__(...) initializes x; see help(type(x)) for signature',
'overloads': [generate_overload(NoneType, ('self', object), ('args', object, '*'), ('kwargs', object, '**'))]
}
}
NewMethodEntry = {
'kind': 'function',
'value': {
'doc': 'T.__new__(S, ...) -> a new object with type S, a subtype of T',
'overloads': [generate_overload(object, ('self', object), ('args', object, '*'), ('kwargs', object, '**'))]
}
}
ReprMethodEntry = {
'kind': 'method',
'value': {
'doc': 'x.__repr__() <==> repr(x)',
'overloads': [generate_overload(str, ('self', object))]
}
}
def _sre_post_fixer(mod):
if sys.platform == 'cli':
# IronPython doesn't have a real _sre module
return mod
mod['members']['compile'] = {
'kind': 'function',
'value': {
'overloads': [generate_overload(typename_to_typeref('_sre', 'SRE_Pattern'),
('pattern', object), ('flags', object), ('code', object), ('groups', object),
('groupindex', object), ('indexgroup', object))],
'builtin' : True,
'static': True,
}
}
mod['members']['SRE_Match'] = {
'kind': 'type',
'value': {
'bases': [(builtin_name, 'object')],
'doc': 'SRE_Match(m: Match, pattern: SRE_Pattern, text: str)\r\nRE_Match(m: Match, pattern: SRE_Pattern, text: str, pos: int, endpos: int)\r\n',
'members': {
'__new__': {
'kind': 'function',
'value': {
'doc': '__new__(cls: type, m: Match, pattern: SRE_Pattern, text: str)\r\n__new__(cls: type, m: Match, pattern: SRE_Pattern, text: str, pos: int, endpos: int)\r\n',
'overloads': None
}
},
'end': {
'kind': 'method',
'value': {
'doc': 'end(self: SRE_Match, group: object) -> int\r\nend(self: SRE_Match) -> int\r\n',
'overloads': [
generate_overload(int, ('self', typename_to_typeref('re', 'SRE_Match'))),
generate_overload(int, ('self', typename_to_typeref('re', 'SRE_Match')), ('group', object))
],
}
},
'endpos': {
'kind': 'property',
'value': {
'doc': 'Get: endpos(self: SRE_Match) -> int\r\n\r\n',
'type': type_to_typelist(int)
}
},
'expand': {
'kind': 'method',
'value': {
'doc': 'expand(self: SRE_Match, template: object) -> str\r\n',
'overloads': [generate_overload(str, ('self', typename_to_typeref('re', 'SRE_Match')), ('template', object))],
}
},
'group': {
'kind': 'method',
'value': {
'doc': 'group(self: SRE_Match) -> str\r\ngroup(self: SRE_Match, index: object) -> str\r\ngroup(self: SRE_Match, index: object, *additional: Array[object]) -> object\r\n',
'overloads': [
generate_overload(str, ('self', typename_to_typeref('re', 'SRE_Match'))),
generate_overload(str, ('self', typename_to_typeref('re', 'SRE_Match')), ('index', object)),
generate_overload(object, ('self', typename_to_typeref('re', 'SRE_Match')), ('index', object), ('additional', tuple, '*'))
],
},
},
'groupdict': {
'kind': 'method',
'value': {
'doc': 'groupdict(self: SRE_Match, value: object) -> dict (of str to object)\r\ngroupdict(self: SRE_Match, value: str) -> dict (of str to str)\r\ngroupdict(self: SRE_Match) -> dict (of str to str)\r\n',
'overloads': [
generate_overload(dict, ('self', typename_to_typeref('re', 'SRE_Match')), ('value', types_to_typelist([object, str]))),
generate_overload(dict, ('self', typename_to_typeref('re', 'SRE_Match')))
],
}
},
'groups': {
'kind': 'method',
'value': {
'doc': 'groups(self: SRE_Match, default: object) -> tuple\r\ngroups(self: SRE_Match) -> tuple (of str)\r\n',
'overloads': [
generate_overload(tuple, ('self', typename_to_typeref('re', 'SRE_Match')), ('default', object)),
generate_overload(tuple, ('self', typename_to_typeref('re', 'SRE_Match')))
],
}
},
'lastgroup': {
'kind': 'property',
'value': {
'doc': 'Get: lastgroup(self: SRE_Match) -> str\r\n\r\n',
'type': type_to_typelist(str)
}
},
'lastindex': {
'kind': 'property',
'value': {
'doc': 'Get: lastindex(self: SRE_Match) -> object\r\n\r\n',
'type': type_to_typelist(object)
}
},
'pos': {
'kind': 'property',
'value': {
'doc': 'Get: pos(self: SRE_Match) -> int\r\n\r\n',
'type': type_to_typelist(int)
}
},
're': {
'kind': 'property',
'value': {
'doc': 'Get: re(self: SRE_Match) -> SRE_Pattern\r\n\r\n',
'type': [typename_to_typeref('re', 'SRE_Pattern')]
}
},
'regs': {
'kind': 'property',
'value': {
'doc': 'Get: regs(self: SRE_Match) -> tuple\r\n\r\n',
'type': type_to_typelist(tuple)
}
},
'span': {
'kind': 'method',
'value': {
'doc': 'span(self: SRE_Match, group: object) -> tuple (of int)\r\nspan(self: SRE_Match) -> tuple (of int)\r\n',
'overloads': [
generate_overload(tuple, ('self', typename_to_typeref('re', 'SRE_Match'))),
generate_overload(tuple, ('self', typename_to_typeref('re', 'SRE_Match')), ('group', object))
]
}
},
'start': {
'kind': 'method',
'value': {
'doc': 'start(self: SRE_Match, group: object) -> int\r\nstart(self: SRE_Match) -> int\r\n',
'overloads': [
generate_overload(int, ('self', typename_to_typeref('re', 'SRE_Match'))),
generate_overload(int, ('self', typename_to_typeref('re', 'SRE_Match')), ('group', object))
]
}
},
'string': {
'kind': 'property',
'value': {
'doc': 'Get: string(self: SRE_Match) -> str\r\n\r\n',
'type': type_to_typelist(str)
}
}
},
'mro': [typename_to_typeref('re', 'SRE_Match'), type_to_typeref(object)]
}
}
mod['members']['SRE_Pattern'] = {
'kind': 'type',
'value': {'bases': [type_to_typeref(object)],
'doc': '',
'members': {
'__eq__': {
'kind': 'method',
'value': {
'doc': 'x.__eq__(y) <==> x==y',
'overloads': [generate_overload(bool, ('self', typename_to_typeref('_sre', 'SRE_Pattern')), ('obj', object))],
}
},
'__ne__': {
'kind': 'method',
'value': {
'doc': '__ne__(x: object, y: object) -> bool\r\n',
'overloads': [generate_overload(bool, ('x', object), ('y', object))]
}
},
'__new__': NewMethodEntry,
'findall': {
'kind': 'method',
'value': {
'doc': 'findall(self: SRE_Pattern, string: object, pos: int, endpos: object) -> object\r\nfindall(self: SRE_Pattern, string: str, pos: int) -> object\r\nfindall(self: SRE_Pattern, string: str) -> object\r\n',
'overloads': [generate_overload(bool, ('self', typename_to_typeref('_sre', 'SRE_Pattern')), ('string', str), ('pos', int, '', '0'), ('endpos', object, '', 'None'))]
}
},
'finditer': {
'kind': 'method',
'value': {
'doc': 'finditer(self: SRE_Pattern, string: object, pos: int, endpos: int) -> object\r\nfinditer(self: SRE_Pattern, string: object, pos: int) -> object\r\nfinditer(self: SRE_Pattern, string: object) -> object\r\n',
'overloads': [generate_overload(object, ('self', typename_to_typeref('_sre', 'SRE_Pattern')), ('string', str), ('pos', int, '', '0'), ('endpos', int, '', 'None'))]
}
},
'flags': {
'kind': 'property',
'value': {
'doc': 'Get: flags(self: SRE_Pattern) -> int\r\n\r\n',
'type': type_to_typelist(int)
}
},
'groupindex': {
'kind': 'property',
'value': {
'doc': 'Get: groupindex(self: SRE_Pattern) -> dict\r\n\r\n',
'type': type_to_typelist(dict)
}
},
'groups': {
'kind': 'property',
'value': {
'doc': 'Get: groups(self: SRE_Pattern) -> int\r\n\r\n',
'type': type_to_typelist(int)
}
},
'match': {
'kind': 'method',
'value': {
'doc': 'match(self: SRE_Pattern, text: object, pos: int, endpos: int) -> RE_Match\r\nmatch(self: SRE_Pattern, text: object, pos: int) -> RE_Match\r\nmatch(self: SRE_Pattern, text: object) -> RE_Match\r\n',
'overloads': [generate_overload(object, ('self', typename_to_typeref('_sre', 'SRE_Pattern')), ('text', str), ('pos', int, '', '0'), ('endpos', int, '', 'None'))],
}
},
'pattern': {
'kind': 'property',
'value': {
'doc': 'Get: pattern(self: SRE_Pattern) -> str\r\n\r\n',
'type': type_to_typelist(str)
}
},
'search': {
'kind': 'method',
'value': {
'doc': 'search(self: SRE_Pattern, text: object, pos: int, endpos: int) -> RE_Match\r\nsearch(self: SRE_Pattern,text: object, pos: int) -> RE_Match\r\nsearch(self: SRE_Pattern, text: object) -> RE_Match\r\n',
'overloads': [generate_overload(typename_to_typeref('_sre', 'RE_Match'), ('self', typename_to_typeref('_sre', 'SRE_Pattern')), ('text', str), ('pos', int, '', '0'), ('endpos', int, '', 'None'))]
}
},
'split': {
'kind': 'method',
'value': {
'doc': 'split(self: SRE_Pattern, string: object, maxsplit: int) -> list (of str)\r\nsplit(self: SRE_Pattern, string: str) -> list (of str)\r\n',
'overloads': [generate_overload(list, ('self', typename_to_typeref('_sre', 'SRE_Pattern')), ('string', str), ('maxsplit', int, '', 'None'))]
}
},
'sub': {
'kind': 'method',
'value': {
'doc': 'sub(self: SRE_Pattern, repl: object, string: object, count: int) -> str\r\nsub(self: SRE_Pattern, repl: object, string: object) -> str\r\n',
'overloads': [generate_overload(str, ('self', typename_to_typeref('_sre', 'SRE_Pattern')), ('repl', object), ('string', str), ('count', int, '', 'None'))]
}
},
'subn': {
'kind': 'method',
'value': {'doc': 'subn(self: SRE_Pattern, repl: object, string: object, count: int) -> object\r\nsubn(self: SRE_Pattern, repl: object, string: str) -> object\r\n',
'overloads': [generate_overload(object, ('self', typename_to_typeref('_sre', 'SRE_Pattern')), ('repl', object), ('string', str), ('count', int, '', 'None'))]
}
},
},
'mro': [typename_to_typeref('_sre', 'SRE_Pattern'),
type_to_typeref(object)]
}
}
return mod
# fixers which run on the newly generated file, not on the baseline file.
post_module_fixers = {
'_sre' : _sre_post_fixer,
}
def merge_with_baseline(mod_name, baselinepath, final):
baseline_file = os.path.join(baselinepath, mod_name + '.idb')
if os.path.exists(baseline_file):
print(baseline_file)
f = open(baseline_file, 'rb')
baseline = pickle.load(f)
f.close()
#import pprint
#pp = pprint.PrettyPrinter()
#pp.pprint(baseline['members'])
fixer = post_module_fixers.get(mod_name, None)
if fixer is not None:
final = fixer(final)
merge_member_table(baseline['members'], final['members'])
return final
def write_analysis(out_filename, analysis):
out_file = open(out_filename + '.idb', 'wb')
saved_analysis = pickle.dumps(analysis, 2)
if sys.platform == 'cli':
# work around strings always being unicode on IronPython, we fail to
# write back out here because IronPython doesn't like the non-ascii
# characters in the unicode string
import System
data = System.Array.CreateInstance(System.Byte, len(saved_analysis))
for i, v in enumerate(saved_analysis):
try:
data[i] = ord(v)
except:
pass
saved_analysis = data
out_file.write(saved_analysis)
out_file.close()
# write a list of members which we can load to check for member existance
out_file = open(out_filename + '.idb.$memlist', 'wb')
for member in sorted(analysis['members']):
if sys.version_info >= (3, 0):
out_file.write((member + '\n').encode('utf8'))
else:
out_file.write(member + '\n')
out_file.close()
def write_module(mod_name, outpath, analysis):
write_analysis(os.path.join(outpath, mod_name), analysis)
if __name__ == "__main__":
outpath = sys.argv[1]
if len(sys.argv) > 2:
baselinepath = sys.argv[2]
else:
baselinepath = None
res = generate_builtin_module()
if not res:
raise RuntimeError("Unable to scrape builtins")
res = merge_with_baseline(builtin_name, baselinepath, res)
write_module(builtin_name, outpath, res)
for mod_name in sys.builtin_module_names:
if (mod_name == builtin_name or
mod_name == '__main__' or
not BuiltinScraper.should_include_module(mod_name)):
continue
res = generate_module(lookup_module(mod_name))
if res is not None:
try:
res = merge_with_baseline(mod_name, baselinepath, res)
write_module(mod_name, outpath, res)
except ValueError:
pass
| 38.09761
| 239
| 0.538614
|
03e7a7949d6709d84c61c7cd26f41770efd26756
| 517
|
py
|
Python
|
plotly/validators/histogram2dcontour/contours/_size.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/histogram2dcontour/contours/_size.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/histogram2dcontour/contours/_size.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='size',
parent_name='histogram2dcontour.contours',
**kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
implied_edits={'^autocontour': False},
min=0,
role='style',
**kwargs
)
| 24.619048
| 66
| 0.582205
|
ceb0ccb6b7b0b0bde1c93ac1c1aa35e70713781d
| 8,547
|
py
|
Python
|
deeplabv3-water/train.py
|
juranccc/CVerrr
|
37788d8bbd4f361474289f1d00e595e4f29b751e
|
[
"MIT"
] | null | null | null |
deeplabv3-water/train.py
|
juranccc/CVerrr
|
37788d8bbd4f361474289f1d00e595e4f29b751e
|
[
"MIT"
] | null | null | null |
deeplabv3-water/train.py
|
juranccc/CVerrr
|
37788d8bbd4f361474289f1d00e595e4f29b751e
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
from nets.deeplabv3_plus import DeepLab
from nets.deeplabv3_training import weights_init
from utils.callbacks import LossHistory
from utils.dataloader import DeeplabDataset, deeplab_dataset_collate
from utils.utils_fit import fit_one_epoch
import torchvision.models as models
import torch.onnx
from torchsummary import summary
if __name__ == "__main__":
#-------------------------------#
# 是否使用Cuda
# 没有GPU可以设置成False
#-------------------------------#
Cuda = True
#-------------------------------#
# 训练自己的数据集必须要修改的
# 自己需要的分类个数+1,如2+1
#-------------------------------#
num_classes = 2
#-------------------------------#
# 所使用的的主干网络:
# mobilenet、xception
#-------------------------------#
backbone = "xception"
#-------------------------------------------------------------------------------------#
# 权值文件请看README,百度网盘下载
# 预训练权重对于99%的情况都必须要用,不用的话权值太过随机,特征提取效果不明显
# 网络训练的结果也不会好,数据的预训练权重对不同数据集是通用的,因为特征是通用的
#------------------------------------------------------------------------------------#
model_path = "model_data/ep148-loss0.038-val_loss0.031.pth"
#-------------------------------#
# 下采样的倍数8、16
# 8要求更大的显存
#-------------------------------#
downsample_factor = 16
#------------------------------#
# 输入图片的大小
#------------------------------#
input_shape = [512, 512]
#----------------------------------------------------#
# 训练分为两个阶段,分别是冻结阶段和解冻阶段
# 冻结阶段训练参数
# 此时模型的主干被冻结了,特征提取网络不发生改变
# 占用的显存较小,仅对网络进行微调
#----------------------------------------------------#
Init_Epoch = 0
Freeze_Epoch = 50
Freeze_batch_size = 8
Freeze_lr = 5e-4
#----------------------------------------------------#
# 解冻阶段训练参数
# 此时模型的主干不被冻结了,特征提取网络会发生改变
# 占用的显存较大,网络所有的参数都会发生改变
#----------------------------------------------------#
UnFreeze_Epoch = 100
Unfreeze_batch_size = 4
Unfreeze_lr = 5e-5
#------------------------------#
# 数据集路径
#------------------------------#
VOCdevkit_path = 'VOCdevkit'
#--------------------------------------------------------------------#
# 建议选项:
# 种类少(几类)时,设置为True
# 种类多(十几类)时,如果batch_size比较大(10以上),那么设置为True
# 种类多(十几类)时,如果batch_size比较小(10以下),那么设置为False
#---------------------------------------------------------------------#
dice_loss = False
#--------------------------------------------------------------------------------------------#
# 主干网络预训练权重的使用,这里的权值部分仅仅代表主干,下方的model_path代表整个模型的权值
# 如果想从主干开始训练,可以把这里的pretrained=True,下方model_path的部分注释掉
#--------------------------------------------------------------------------------------------#
pretrained = False
#------------------------------------------------------#
# 是否进行冻结训练,默认先冻结主干训练后解冻训练。
#------------------------------------------------------#
Freeze_Train = True
#------------------------------------------------------#
# 用于设置是否使用多线程读取数据
# 开启后会加快数据读取速度,但是会占用更多内存
# 内存较小的电脑可以设置为2或者0
#------------------------------------------------------#
num_workers = 4
model = DeepLab(num_classes=num_classes, backbone=backbone, downsample_factor=downsample_factor, pretrained=pretrained)
if not pretrained:
weights_init(model)
#------------------------------------------------------#
# 权值文件请看README,百度网盘下载
#------------------------------------------------------#
print('Load weights {}.'.format(model_path))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_dict = model.state_dict()
pretrained_dict = torch.load(model_path, map_location = device)
pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
model_train = model.train()
batch_size = 1
dummy_input = torch.randn(batch_size, 3, 512, 512)
torch.onnx.export(model, dummy_input, 'water_seg_torch.onnx',verbose=False,opset_version=11)
if Cuda:
model_train = torch.nn.DataParallel(model)
cudnn.benchmark = True
model_train = model_train.cuda()
loss_history = LossHistory("logs/")
#---------------------------#
# 读取数据集对应的txt
#---------------------------#
with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/train.txt"),"r") as f:
train_lines = f.readlines()
with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/val.txt"),"r") as f:
val_lines = f.readlines()
#------------------------------------------------------#
# 主干特征提取网络特征通用,冻结训练可以加快训练速度
# 也可以在训练初期防止权值被破坏。
# Init_Epoch为起始世代
# Interval_Epoch为冻结训练的世代
# Epoch总训练世代
# 提示OOM或者显存不足请调小Batch_size
#------------------------------------------------------#
if True:
batch_size = Freeze_batch_size
lr = Freeze_lr
start_epoch = Init_Epoch
end_epoch = Freeze_Epoch
optimizer = optim.Adam(model_train.parameters(), lr, weight_decay = 5e-4)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size = 1, gamma = 0.92)
train_dataset = DeeplabDataset(train_lines, input_shape, num_classes, True, VOCdevkit_path)
val_dataset = DeeplabDataset(val_lines, input_shape, num_classes, False, VOCdevkit_path)
gen = DataLoader(train_dataset, shuffle = True, batch_size = batch_size, num_workers = num_workers, pin_memory=True,
drop_last = True, collate_fn = deeplab_dataset_collate)
gen_val = DataLoader(val_dataset , shuffle = True, batch_size = batch_size, num_workers = num_workers, pin_memory=True,
drop_last = True, collate_fn = deeplab_dataset_collate)
epoch_step = len(train_lines) // batch_size
epoch_step_val = len(val_lines) // batch_size
if epoch_step == 0 or epoch_step_val == 0:
raise ValueError("数据集过小,无法进行训练,请扩充数据集。")
#------------------------------------#
# 冻结一定部分训练
#------------------------------------#
if Freeze_Train:
for param in model.backbone.parameters():
param.requires_grad = False
for epoch in range(start_epoch, end_epoch):
fit_one_epoch(model_train, model, loss_history, optimizer, epoch,
epoch_step, epoch_step_val, gen, gen_val, end_epoch, Cuda, dice_loss, num_classes)
lr_scheduler.step()
if True:
batch_size = Unfreeze_batch_size
lr = Unfreeze_lr
start_epoch = Freeze_Epoch
end_epoch = UnFreeze_Epoch
optimizer = optim.Adam(model_train.parameters(), lr, weight_decay = 5e-4)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size = 1, gamma = 0.92)
train_dataset = DeeplabDataset(train_lines, input_shape, num_classes, True, VOCdevkit_path)
val_dataset = DeeplabDataset(val_lines, input_shape, num_classes, False, VOCdevkit_path)
gen = DataLoader(train_dataset, shuffle = True, batch_size = batch_size, num_workers = num_workers, pin_memory=True,
drop_last = True, collate_fn = deeplab_dataset_collate)
gen_val = DataLoader(val_dataset , shuffle = True, batch_size = batch_size, num_workers = num_workers, pin_memory=True,
drop_last = True, collate_fn = deeplab_dataset_collate)
epoch_step = len(train_lines) // batch_size
epoch_step_val = len(val_lines) // batch_size
if epoch_step == 0 or epoch_step_val == 0:
raise ValueError("数据集过小,无法进行训练,请扩充数据集。")
if Freeze_Train:
for param in model.backbone.parameters():
param.requires_grad = True
for epoch in range(start_epoch,end_epoch):
fit_one_epoch(model_train, model, loss_history, optimizer, epoch,
epoch_step, epoch_step_val, gen, gen_val, end_epoch, Cuda, dice_loss, num_classes)
lr_scheduler.step()
| 41.289855
| 137
| 0.509068
|
705e4dfd2c7dc1927662f14af668bde836621e03
| 70,906
|
py
|
Python
|
silicoin/full_node/weight_proof.py
|
zcomputerwiz/silicoin-light-wallet
|
1cdc3784effec229cc841a04655078b1d9913d33
|
[
"Apache-2.0"
] | null | null | null |
silicoin/full_node/weight_proof.py
|
zcomputerwiz/silicoin-light-wallet
|
1cdc3784effec229cc841a04655078b1d9913d33
|
[
"Apache-2.0"
] | null | null | null |
silicoin/full_node/weight_proof.py
|
zcomputerwiz/silicoin-light-wallet
|
1cdc3784effec229cc841a04655078b1d9913d33
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import dataclasses
import logging
import math
import pathlib
import random
from concurrent.futures.process import ProcessPoolExecutor
from typing import Dict, List, Optional, Tuple
from silicoin.consensus.block_header_validation import validate_finished_header_block
from silicoin.consensus.block_record import BlockRecord
from silicoin.consensus.blockchain_interface import BlockchainInterface
from silicoin.consensus.constants import ConsensusConstants
from silicoin.consensus.deficit import calculate_deficit
from silicoin.consensus.full_block_to_block_record import header_block_to_sub_block_record
from silicoin.consensus.pot_iterations import (
calculate_ip_iters,
calculate_iterations_quality,
calculate_sp_iters,
is_overflow_block,
)
from silicoin.consensus.vdf_info_computation import get_signage_point_vdf_info
from silicoin.types.blockchain_format.classgroup import ClassgroupElement
from silicoin.types.blockchain_format.sized_bytes import bytes32
from silicoin.types.blockchain_format.slots import ChallengeChainSubSlot, RewardChainSubSlot
from silicoin.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from silicoin.types.blockchain_format.vdf import VDFInfo, VDFProof
from silicoin.types.end_of_slot_bundle import EndOfSubSlotBundle
from silicoin.types.header_block import HeaderBlock
from silicoin.types.weight_proof import (
SubEpochChallengeSegment,
SubEpochData,
SubSlotData,
WeightProof,
SubEpochSegments,
RecentChainData,
)
from silicoin.util.block_cache import BlockCache
from silicoin.util.hash import std_hash
from silicoin.util.ints import uint8, uint32, uint64, uint128
from silicoin.util.streamable import dataclass_from_dict, recurse_jsonify
log = logging.getLogger(__name__)
class WeightProofHandler:
LAMBDA_L = 100
C = 0.5
MAX_SAMPLES = 20
def __init__(
self,
constants: ConsensusConstants,
blockchain: BlockchainInterface,
):
self.tip: Optional[bytes32] = None
self.proof: Optional[WeightProof] = None
self.constants = constants
self.blockchain = blockchain
self.lock = asyncio.Lock()
self._num_processes = 4
async def get_proof_of_weight(self, tip: bytes32) -> Optional[WeightProof]:
tip_rec = self.blockchain.try_block_record(tip)
if tip_rec is None:
log.error("unknown tip")
return None
if tip_rec.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
log.debug("chain to short for weight proof")
return None
async with self.lock:
if self.proof is not None:
if self.proof.recent_chain_data[-1].header_hash == tip:
return self.proof
wp = await self._create_proof_of_weight(tip)
if wp is None:
return None
self.proof = wp
self.tip = tip
return wp
def get_sub_epoch_data(self, tip_height: uint32, summary_heights: List[uint32]) -> List[SubEpochData]:
sub_epoch_data: List[SubEpochData] = []
for sub_epoch_n, ses_height in enumerate(summary_heights):
if ses_height > tip_height:
break
ses = self.blockchain.get_ses(ses_height)
log.debug(f"handle sub epoch summary {sub_epoch_n} at height: {ses_height} ses {ses}")
sub_epoch_data.append(_create_sub_epoch_data(ses))
return sub_epoch_data
async def _create_proof_of_weight(self, tip: bytes32) -> Optional[WeightProof]:
"""
Creates a weight proof object
"""
assert self.blockchain is not None
sub_epoch_segments: List[SubEpochChallengeSegment] = []
tip_rec = self.blockchain.try_block_record(tip)
if tip_rec is None:
log.error("failed not tip in cache")
return None
log.info(f"create weight proof peak {tip} {tip_rec.height}")
recent_chain = await self._get_recent_chain(tip_rec.height)
if recent_chain is None:
return None
summary_heights = self.blockchain.get_ses_heights()
prev_ses_block = await self.blockchain.get_block_record_from_db(self.blockchain.height_to_hash(uint32(0)))
if prev_ses_block is None:
return None
sub_epoch_data = self.get_sub_epoch_data(tip_rec.height, summary_heights)
# use second to last ses as seed
seed = self.get_seed_for_proof(summary_heights, tip_rec.height)
rng = random.Random(seed)
weight_to_check = _get_weights_for_sampling(rng, tip_rec.weight, recent_chain)
sample_n = 0
ses_blocks = await self.blockchain.get_block_records_at(summary_heights)
if ses_blocks is None:
return None
for sub_epoch_n, ses_height in enumerate(summary_heights):
if ses_height > tip_rec.height:
break
# if we have enough sub_epoch samples, dont sample
if sample_n >= self.MAX_SAMPLES:
log.debug("reached sampled sub epoch cap")
break
# sample sub epoch
# next sub block
ses_block = ses_blocks[sub_epoch_n]
if ses_block is None or ses_block.sub_epoch_summary_included is None:
log.error("error while building proof")
return None
if _sample_sub_epoch(prev_ses_block.weight, ses_block.weight, weight_to_check): # type: ignore
sample_n += 1
segments = await self.blockchain.get_sub_epoch_challenge_segments(ses_block.header_hash)
if segments is None:
segments = await self.__create_sub_epoch_segments(ses_block, prev_ses_block, uint32(sub_epoch_n))
if segments is None:
log.error(
f"failed while building segments for sub epoch {sub_epoch_n}, ses height {ses_height} "
)
return None
await self.blockchain.persist_sub_epoch_challenge_segments(ses_block.header_hash, segments)
log.debug(f"sub epoch {sub_epoch_n} has {len(segments)} segments")
sub_epoch_segments.extend(segments)
prev_ses_block = ses_block
log.debug(f"sub_epochs: {len(sub_epoch_data)}")
return WeightProof(sub_epoch_data, sub_epoch_segments, recent_chain)
def get_seed_for_proof(self, summary_heights: List[uint32], tip_height) -> bytes32:
count = 0
ses = None
for sub_epoch_n, ses_height in enumerate(reversed(summary_heights)):
if ses_height <= tip_height:
count += 1
if count == 2:
ses = self.blockchain.get_ses(ses_height)
break
assert ses is not None
seed = ses.get_hash()
return seed
async def _get_recent_chain(self, tip_height: uint32) -> Optional[List[HeaderBlock]]:
recent_chain: List[HeaderBlock] = []
ses_heights = self.blockchain.get_ses_heights()
min_height = 0
count_ses = 0
for ses_height in reversed(ses_heights):
if ses_height <= tip_height:
count_ses += 1
if count_ses == 2:
min_height = ses_height - 1
break
log.debug(f"start {min_height} end {tip_height}")
headers = await self.blockchain.get_header_blocks_in_range(min_height, tip_height, tx_filter=False)
blocks = await self.blockchain.get_block_records_in_range(min_height, tip_height)
ses_count = 0
curr_height = tip_height
blocks_n = 0
while ses_count < 2:
if curr_height == 0:
break
# add to needed reward chain recent blocks
header_block = headers[self.blockchain.height_to_hash(curr_height)]
block_rec = blocks[header_block.header_hash]
if header_block is None:
log.error("creating recent chain failed")
return None
recent_chain.insert(0, header_block)
if block_rec.sub_epoch_summary_included:
ses_count += 1
curr_height = uint32(curr_height - 1) # type: ignore
blocks_n += 1
header_block = headers[self.blockchain.height_to_hash(curr_height)]
recent_chain.insert(0, header_block)
log.info(
f"recent chain, "
f"start: {recent_chain[0].reward_chain_block.height} "
f"end: {recent_chain[-1].reward_chain_block.height} "
)
return recent_chain
async def create_prev_sub_epoch_segments(self):
log.debug("create prev sub_epoch_segments")
heights = self.blockchain.get_ses_heights()
if len(heights) < 3:
return None
count = len(heights) - 2
ses_sub_block = self.blockchain.height_to_block_record(heights[-2])
prev_ses_sub_block = self.blockchain.height_to_block_record(heights[-3])
assert prev_ses_sub_block.sub_epoch_summary_included is not None
segments = await self.__create_sub_epoch_segments(ses_sub_block, prev_ses_sub_block, uint32(count))
assert segments is not None
await self.blockchain.persist_sub_epoch_challenge_segments(ses_sub_block.header_hash, segments)
log.debug("sub_epoch_segments done")
return None
async def create_sub_epoch_segments(self):
log.debug("check segments in db")
"""
Creates a weight proof object
"""
assert self.blockchain is not None
peak_height = self.blockchain.get_peak_height()
if peak_height is None:
log.error("no peak yet")
return None
summary_heights = self.blockchain.get_ses_heights()
prev_ses_block = await self.blockchain.get_block_record_from_db(self.blockchain.height_to_hash(uint32(0)))
if prev_ses_block is None:
return None
ses_blocks = await self.blockchain.get_block_records_at(summary_heights)
if ses_blocks is None:
return None
for sub_epoch_n, ses_height in enumerate(summary_heights):
log.debug(f"check db for sub epoch {sub_epoch_n}")
if ses_height > peak_height:
break
ses_block = ses_blocks[sub_epoch_n]
if ses_block is None or ses_block.sub_epoch_summary_included is None:
log.error("error while building proof")
return None
await self.__create_persist_segment(prev_ses_block, ses_block, ses_height, sub_epoch_n)
prev_ses_block = ses_block
await asyncio.sleep(2)
log.debug("done checking segments")
return None
async def __create_persist_segment(self, prev_ses_block, ses_block, ses_height, sub_epoch_n):
segments = await self.blockchain.get_sub_epoch_challenge_segments(ses_block.header_hash)
if segments is None:
segments = await self.__create_sub_epoch_segments(ses_block, prev_ses_block, uint32(sub_epoch_n))
if segments is None:
log.error(f"failed while building segments for sub epoch {sub_epoch_n}, ses height {ses_height} ")
return None
await self.blockchain.persist_sub_epoch_challenge_segments(ses_block.header_hash, segments)
async def __create_sub_epoch_segments(
self, ses_block: BlockRecord, se_start: BlockRecord, sub_epoch_n: uint32
) -> Optional[List[SubEpochChallengeSegment]]:
segments: List[SubEpochChallengeSegment] = []
start_height = await self.get_prev_two_slots_height(se_start)
blocks = await self.blockchain.get_block_records_in_range(
start_height, ses_block.height + self.constants.MAX_SUB_SLOT_BLOCKS
)
header_blocks = await self.blockchain.get_header_blocks_in_range(
start_height, ses_block.height + self.constants.MAX_SUB_SLOT_BLOCKS, tx_filter=False
)
curr: Optional[HeaderBlock] = header_blocks[se_start.header_hash]
height = se_start.height
assert curr is not None
first = True
idx = 0
while curr.height < ses_block.height:
if blocks[curr.header_hash].is_challenge_block(self.constants):
log.debug(f"challenge segment {idx}, starts at {curr.height} ")
seg, height = await self._create_challenge_segment(curr, sub_epoch_n, header_blocks, blocks, first)
if seg is None:
log.error(f"failed creating segment {curr.header_hash} ")
return None
segments.append(seg)
idx += 1
first = False
else:
height = height + uint32(1) # type: ignore
curr = header_blocks[self.blockchain.height_to_hash(height)]
if curr is None:
return None
log.debug(f"next sub epoch starts at {height}")
return segments
async def get_prev_two_slots_height(self, se_start: BlockRecord) -> uint32:
# find prev 2 slots height
slot = 0
batch_size = 50
curr_rec = se_start
blocks = await self.blockchain.get_block_records_in_range(curr_rec.height - batch_size, curr_rec.height)
end = curr_rec.height
while slot < 2 and curr_rec.height > 0:
if curr_rec.first_in_sub_slot:
slot += 1
if end - curr_rec.height == batch_size - 1:
blocks = await self.blockchain.get_block_records_in_range(curr_rec.height - batch_size, curr_rec.height)
end = curr_rec.height
curr_rec = blocks[self.blockchain.height_to_hash(uint32(curr_rec.height - 1))]
return curr_rec.height
async def _create_challenge_segment(
self,
header_block: HeaderBlock,
sub_epoch_n: uint32,
header_blocks: Dict[bytes32, HeaderBlock],
blocks: Dict[bytes32, BlockRecord],
first_segment_in_sub_epoch: bool,
) -> Tuple[Optional[SubEpochChallengeSegment], uint32]:
assert self.blockchain is not None
sub_slots: List[SubSlotData] = []
log.debug(f"create challenge segment block {header_block.header_hash} block height {header_block.height} ")
# VDFs from sub slots before challenge block
first_sub_slots, first_rc_end_of_slot_vdf = await self.__first_sub_slot_vdfs(
header_block, header_blocks, blocks, first_segment_in_sub_epoch
)
if first_sub_slots is None:
log.error("failed building first sub slots")
return None, uint32(0)
sub_slots.extend(first_sub_slots)
ssd = await _challenge_block_vdfs(
self.constants,
header_block,
blocks[header_block.header_hash],
blocks,
)
sub_slots.append(ssd)
# # VDFs from slot after challenge block to end of slot
log.debug(f"create slot end vdf for block {header_block.header_hash} height {header_block.height} ")
challenge_slot_end_sub_slots, end_height = await self.__slot_end_vdf(
uint32(header_block.height + 1), header_blocks, blocks
)
if challenge_slot_end_sub_slots is None:
log.error("failed building slot end ")
return None, uint32(0)
sub_slots.extend(challenge_slot_end_sub_slots)
if first_segment_in_sub_epoch and sub_epoch_n != 0:
return (
SubEpochChallengeSegment(sub_epoch_n, sub_slots, first_rc_end_of_slot_vdf),
end_height,
)
return SubEpochChallengeSegment(sub_epoch_n, sub_slots, None), end_height
# returns a challenge chain vdf from slot start to signage point
async def __first_sub_slot_vdfs(
self,
header_block: HeaderBlock,
header_blocks: Dict[bytes32, HeaderBlock],
blocks: Dict[bytes32, BlockRecord],
first_in_sub_epoch: bool,
) -> Tuple[Optional[List[SubSlotData]], Optional[VDFInfo]]:
# combine cc vdfs of all reward blocks from the start of the sub slot to end
header_block_sub_rec = blocks[header_block.header_hash]
# find slot start
curr_sub_rec = header_block_sub_rec
first_rc_end_of_slot_vdf = None
if first_in_sub_epoch and curr_sub_rec.height > 0:
while not curr_sub_rec.sub_epoch_summary_included:
curr_sub_rec = blocks[curr_sub_rec.prev_hash]
first_rc_end_of_slot_vdf = self.first_rc_end_of_slot_vdf(header_block, blocks, header_blocks)
else:
if header_block_sub_rec.overflow and header_block_sub_rec.first_in_sub_slot:
sub_slots_num = 2
while sub_slots_num > 0 and curr_sub_rec.height > 0:
if curr_sub_rec.first_in_sub_slot:
assert curr_sub_rec.finished_challenge_slot_hashes is not None
sub_slots_num -= len(curr_sub_rec.finished_challenge_slot_hashes)
curr_sub_rec = blocks[curr_sub_rec.prev_hash]
else:
while not curr_sub_rec.first_in_sub_slot and curr_sub_rec.height > 0:
curr_sub_rec = blocks[curr_sub_rec.prev_hash]
curr = header_blocks[curr_sub_rec.header_hash]
sub_slots_data: List[SubSlotData] = []
tmp_sub_slots_data: List[SubSlotData] = []
while curr.height < header_block.height:
if curr is None:
log.error("failed fetching block")
return None, None
if curr.first_in_sub_slot:
# if not blue boxed
if not blue_boxed_end_of_slot(curr.finished_sub_slots[0]):
sub_slots_data.extend(tmp_sub_slots_data)
for idx, sub_slot in enumerate(curr.finished_sub_slots):
curr_icc_info = None
if sub_slot.infused_challenge_chain is not None:
curr_icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
sub_slots_data.append(handle_finished_slots(sub_slot, curr_icc_info))
tmp_sub_slots_data = []
ssd = SubSlotData(
None,
None,
None,
None,
None,
curr.reward_chain_block.signage_point_index,
None,
None,
None,
None,
curr.reward_chain_block.challenge_chain_ip_vdf,
curr.reward_chain_block.infused_challenge_chain_ip_vdf,
curr.total_iters,
)
tmp_sub_slots_data.append(ssd)
curr = header_blocks[self.blockchain.height_to_hash(uint32(curr.height + 1))]
if len(tmp_sub_slots_data) > 0:
sub_slots_data.extend(tmp_sub_slots_data)
for idx, sub_slot in enumerate(header_block.finished_sub_slots):
curr_icc_info = None
if sub_slot.infused_challenge_chain is not None:
curr_icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
sub_slots_data.append(handle_finished_slots(sub_slot, curr_icc_info))
return sub_slots_data, first_rc_end_of_slot_vdf
def first_rc_end_of_slot_vdf(
self,
header_block,
blocks: Dict[bytes32, BlockRecord],
header_blocks: Dict[bytes32, HeaderBlock],
) -> Optional[VDFInfo]:
curr = blocks[header_block.header_hash]
while curr.height > 0 and not curr.sub_epoch_summary_included:
curr = blocks[curr.prev_hash]
return header_blocks[curr.header_hash].finished_sub_slots[-1].reward_chain.end_of_slot_vdf
async def __slot_end_vdf(
self, start_height: uint32, header_blocks: Dict[bytes32, HeaderBlock], blocks: Dict[bytes32, BlockRecord]
) -> Tuple[Optional[List[SubSlotData]], uint32]:
# gets all vdfs first sub slot after challenge block to last sub slot
log.debug(f"slot end vdf start height {start_height}")
curr = header_blocks[self.blockchain.height_to_hash(start_height)]
curr_header_hash = curr.header_hash
sub_slots_data: List[SubSlotData] = []
tmp_sub_slots_data: List[SubSlotData] = []
while not blocks[curr_header_hash].is_challenge_block(self.constants):
if curr.first_in_sub_slot:
sub_slots_data.extend(tmp_sub_slots_data)
curr_prev_header_hash = curr.prev_header_hash
# add collected vdfs
for idx, sub_slot in enumerate(curr.finished_sub_slots):
prev_rec = blocks[curr_prev_header_hash]
eos_vdf_iters = prev_rec.sub_slot_iters
if idx == 0:
eos_vdf_iters = uint64(prev_rec.sub_slot_iters - prev_rec.ip_iters(self.constants))
sub_slots_data.append(handle_end_of_slot(sub_slot, eos_vdf_iters))
tmp_sub_slots_data = []
tmp_sub_slots_data.append(self.handle_block_vdfs(curr, blocks))
curr = header_blocks[self.blockchain.height_to_hash(uint32(curr.height + 1))]
curr_header_hash = curr.header_hash
if len(tmp_sub_slots_data) > 0:
sub_slots_data.extend(tmp_sub_slots_data)
log.debug(f"slot end vdf end height {curr.height} slots {len(sub_slots_data)} ")
return sub_slots_data, curr.height
def handle_block_vdfs(self, curr: HeaderBlock, blocks: Dict[bytes32, BlockRecord]):
cc_sp_proof = None
icc_ip_proof = None
cc_sp_info = None
icc_ip_info = None
block_record = blocks[curr.header_hash]
if curr.infused_challenge_chain_ip_proof is not None:
assert curr.reward_chain_block.infused_challenge_chain_ip_vdf
icc_ip_proof = curr.infused_challenge_chain_ip_proof
icc_ip_info = curr.reward_chain_block.infused_challenge_chain_ip_vdf
if curr.challenge_chain_sp_proof is not None:
assert curr.reward_chain_block.challenge_chain_sp_vdf
cc_sp_vdf_info = curr.reward_chain_block.challenge_chain_sp_vdf
if not curr.challenge_chain_sp_proof.normalized_to_identity:
(_, _, _, _, cc_vdf_iters, _,) = get_signage_point_vdf_info(
self.constants,
curr.finished_sub_slots,
block_record.overflow,
None if curr.height == 0 else blocks[curr.prev_header_hash],
BlockCache(blocks),
block_record.sp_total_iters(self.constants),
block_record.sp_iters(self.constants),
)
cc_sp_vdf_info = VDFInfo(
curr.reward_chain_block.challenge_chain_sp_vdf.challenge,
cc_vdf_iters,
curr.reward_chain_block.challenge_chain_sp_vdf.output,
)
cc_sp_proof = curr.challenge_chain_sp_proof
cc_sp_info = cc_sp_vdf_info
return SubSlotData(
None,
cc_sp_proof,
curr.challenge_chain_ip_proof,
icc_ip_proof,
cc_sp_info,
curr.reward_chain_block.signage_point_index,
None,
None,
None,
None,
curr.reward_chain_block.challenge_chain_ip_vdf,
icc_ip_info,
curr.total_iters,
)
def validate_weight_proof_single_proc(self, weight_proof: WeightProof) -> Tuple[bool, uint32]:
assert self.blockchain is not None
assert len(weight_proof.sub_epochs) > 0
if len(weight_proof.sub_epochs) == 0:
return False, uint32(0)
peak_height = weight_proof.recent_chain_data[-1].reward_chain_block.height
log.info(f"validate weight proof peak height {peak_height}")
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
if summaries is None:
log.warning("weight proof failed sub epoch data validation")
return False, uint32(0)
constants, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes = vars_to_bytes(
self.constants, summaries, weight_proof
)
log.info("validate sub epoch challenge segments")
seed = summaries[-2].get_hash()
rng = random.Random(seed)
if not validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof):
log.error("failed weight proof sub epoch sample validation")
return False, uint32(0)
if not _validate_sub_epoch_segments(constants, rng, wp_segment_bytes, summary_bytes):
return False, uint32(0)
log.info("validate weight proof recent blocks")
if not _validate_recent_blocks(constants, wp_recent_chain_bytes, summary_bytes):
return False, uint32(0)
return True, self.get_fork_point(summaries)
def get_fork_point_no_validations(self, weight_proof: WeightProof) -> Tuple[bool, uint32]:
log.debug("get fork point skip validations")
assert self.blockchain is not None
assert len(weight_proof.sub_epochs) > 0
if len(weight_proof.sub_epochs) == 0:
return False, uint32(0)
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
if summaries is None:
log.warning("weight proof failed to validate sub epoch summaries")
return False, uint32(0)
return True, self.get_fork_point(summaries)
async def validate_weight_proof(self, weight_proof: WeightProof) -> Tuple[bool, uint32, List[SubEpochSummary]]:
assert self.blockchain is not None
assert len(weight_proof.sub_epochs) > 0
if len(weight_proof.sub_epochs) == 0:
return False, uint32(0), []
peak_height = weight_proof.recent_chain_data[-1].reward_chain_block.height
log.info(f"validate weight proof peak height {peak_height}")
summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof)
if summaries is None:
log.error("weight proof failed sub epoch data validation")
return False, uint32(0), []
seed = summaries[-2].get_hash()
rng = random.Random(seed)
if not validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof):
log.error("failed weight proof sub epoch sample validation")
return False, uint32(0), []
executor = ProcessPoolExecutor(4)
constants, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes = vars_to_bytes(
self.constants, summaries, weight_proof
)
recent_blocks_validation_task = asyncio.get_running_loop().run_in_executor(
executor, _validate_recent_blocks, constants, wp_recent_chain_bytes, summary_bytes
)
segments_validated, vdfs_to_validate = _validate_sub_epoch_segments(
constants, rng, wp_segment_bytes, summary_bytes
)
if not segments_validated:
return False, uint32(0), []
vdf_chunks = chunks(vdfs_to_validate, self._num_processes)
vdf_tasks = []
for chunk in vdf_chunks:
byte_chunks = []
for vdf_proof, classgroup, vdf_info in chunk:
byte_chunks.append((bytes(vdf_proof), bytes(classgroup), bytes(vdf_info)))
vdf_task = asyncio.get_running_loop().run_in_executor(executor, _validate_vdf_batch, constants, byte_chunks)
vdf_tasks.append(vdf_task)
for vdf_task in vdf_tasks:
validated = await vdf_task
if not validated:
return False, uint32(0), []
valid_recent_blocks_task = recent_blocks_validation_task
valid_recent_blocks = await valid_recent_blocks_task
if not valid_recent_blocks:
log.error("failed validating weight proof recent blocks")
return False, uint32(0), []
return True, self.get_fork_point(summaries), summaries
def get_fork_point(self, received_summaries: List[SubEpochSummary]) -> uint32:
# iterate through sub epoch summaries to find fork point
fork_point_index = 0
ses_heights = self.blockchain.get_ses_heights()
for idx, summary_height in enumerate(ses_heights):
log.debug(f"check summary {idx} height {summary_height}")
local_ses = self.blockchain.get_ses(summary_height)
if idx == len(received_summaries) - 1:
# end of wp summaries, local chain is longer or equal to wp chain
break
if local_ses is None or local_ses.get_hash() != received_summaries[idx].get_hash():
break
fork_point_index = idx
if fork_point_index > 2:
# Two summeries can have different blocks and still be identical
# This gets resolved after one full sub epoch
height = ses_heights[fork_point_index - 2]
else:
height = uint32(0)
return height
def _get_weights_for_sampling(
rng: random.Random, total_weight: uint128, recent_chain: List[HeaderBlock]
) -> Optional[List[uint128]]:
weight_to_check = []
last_l_weight = recent_chain[-1].reward_chain_block.weight - recent_chain[0].reward_chain_block.weight
delta = last_l_weight / total_weight
prob_of_adv_succeeding = 1 - math.log(WeightProofHandler.C, delta)
if prob_of_adv_succeeding <= 0:
return None
queries = -WeightProofHandler.LAMBDA_L * math.log(2, prob_of_adv_succeeding)
for i in range(int(queries) + 1):
u = rng.random()
q = 1 - delta ** u
# todo check division and type conversions
weight = q * float(total_weight)
weight_to_check.append(uint128(int(weight)))
weight_to_check.sort()
return weight_to_check
def _sample_sub_epoch(
start_of_epoch_weight: uint128,
end_of_epoch_weight: uint128,
weight_to_check: List[uint128],
) -> bool:
"""
weight_to_check: List[uint128] is expected to be sorted
"""
if weight_to_check is None:
return True
if weight_to_check[-1] < start_of_epoch_weight:
return False
if weight_to_check[0] > end_of_epoch_weight:
return False
choose = False
for weight in weight_to_check:
if weight > end_of_epoch_weight:
return False
if start_of_epoch_weight < weight < end_of_epoch_weight:
log.debug(f"start weight: {start_of_epoch_weight}")
log.debug(f"weight to check {weight}")
log.debug(f"end weight: {end_of_epoch_weight}")
choose = True
break
return choose
# wp creation methods
def _create_sub_epoch_data(
sub_epoch_summary: SubEpochSummary,
) -> SubEpochData:
reward_chain_hash: bytes32 = sub_epoch_summary.reward_chain_hash
# Number of subblocks overflow in previous slot
previous_sub_epoch_overflows: uint8 = sub_epoch_summary.num_blocks_overflow # total in sub epoch - expected
# New work difficulty and iterations per sub-slot
sub_slot_iters: Optional[uint64] = sub_epoch_summary.new_sub_slot_iters
new_difficulty: Optional[uint64] = sub_epoch_summary.new_difficulty
return SubEpochData(reward_chain_hash, previous_sub_epoch_overflows, sub_slot_iters, new_difficulty)
async def _challenge_block_vdfs(
constants: ConsensusConstants,
header_block: HeaderBlock,
block_rec: BlockRecord,
sub_blocks: Dict[bytes32, BlockRecord],
):
(_, _, _, _, cc_vdf_iters, _,) = get_signage_point_vdf_info(
constants,
header_block.finished_sub_slots,
block_rec.overflow,
None if header_block.height == 0 else sub_blocks[header_block.prev_header_hash],
BlockCache(sub_blocks),
block_rec.sp_total_iters(constants),
block_rec.sp_iters(constants),
)
cc_sp_info = None
if header_block.reward_chain_block.challenge_chain_sp_vdf:
cc_sp_info = header_block.reward_chain_block.challenge_chain_sp_vdf
assert header_block.challenge_chain_sp_proof
if not header_block.challenge_chain_sp_proof.normalized_to_identity:
cc_sp_info = VDFInfo(
header_block.reward_chain_block.challenge_chain_sp_vdf.challenge,
cc_vdf_iters,
header_block.reward_chain_block.challenge_chain_sp_vdf.output,
)
ssd = SubSlotData(
header_block.reward_chain_block.proof_of_space,
header_block.challenge_chain_sp_proof,
header_block.challenge_chain_ip_proof,
None,
cc_sp_info,
header_block.reward_chain_block.signage_point_index,
None,
None,
None,
None,
header_block.reward_chain_block.challenge_chain_ip_vdf,
header_block.reward_chain_block.infused_challenge_chain_ip_vdf,
block_rec.total_iters,
)
return ssd
def handle_finished_slots(end_of_slot: EndOfSubSlotBundle, icc_end_of_slot_info):
return SubSlotData(
None,
None,
None,
None,
None,
None,
None
if end_of_slot.proofs.challenge_chain_slot_proof is None
else end_of_slot.proofs.challenge_chain_slot_proof,
None
if end_of_slot.proofs.infused_challenge_chain_slot_proof is None
else end_of_slot.proofs.infused_challenge_chain_slot_proof,
end_of_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
icc_end_of_slot_info,
None,
None,
None,
)
def handle_end_of_slot(
sub_slot: EndOfSubSlotBundle,
eos_vdf_iters: uint64,
):
assert sub_slot.infused_challenge_chain
assert sub_slot.proofs.infused_challenge_chain_slot_proof
if sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity:
icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf
else:
icc_info = VDFInfo(
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.challenge,
eos_vdf_iters,
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output,
)
if sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity:
cc_info = sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf
else:
cc_info = VDFInfo(
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
eos_vdf_iters,
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.output,
)
assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
return SubSlotData(
None,
None,
None,
None,
None,
None,
sub_slot.proofs.challenge_chain_slot_proof,
sub_slot.proofs.infused_challenge_chain_slot_proof,
cc_info,
icc_info,
None,
None,
None,
)
def chunks(some_list, chunk_size):
chunk_size = max(1, chunk_size)
return (some_list[i : i + chunk_size] for i in range(0, len(some_list), chunk_size))
def compress_segments(full_segment_index, segments: List[SubEpochChallengeSegment]) -> List[SubEpochChallengeSegment]:
compressed_segments = []
compressed_segments.append(segments[0])
for idx, segment in enumerate(segments[1:]):
if idx != full_segment_index:
# remove all redundant values
segment = compress_segment(segment)
compressed_segments.append(segment)
return compressed_segments
def compress_segment(segment: SubEpochChallengeSegment) -> SubEpochChallengeSegment:
# find challenge slot
comp_seg = SubEpochChallengeSegment(segment.sub_epoch_n, [], segment.rc_slot_end_info)
for slot in segment.sub_slots:
comp_seg.sub_slots.append(slot)
if slot.is_challenge():
break
return segment
# wp validation methods
def _validate_sub_epoch_summaries(
constants: ConsensusConstants,
weight_proof: WeightProof,
) -> Tuple[Optional[List[SubEpochSummary]], Optional[List[uint128]]]:
last_ses_hash, last_ses_sub_height = _get_last_ses_hash(constants, weight_proof.recent_chain_data)
if last_ses_hash is None:
log.warning("could not find last ses block")
return None, None
summaries, total, sub_epoch_weight_list = _map_sub_epoch_summaries(
constants.SUB_EPOCH_BLOCKS,
constants.GENESIS_CHALLENGE,
weight_proof.sub_epochs,
constants.DIFFICULTY_STARTING,
)
log.info(f"validating {len(summaries)} sub epochs")
# validate weight
if not _validate_summaries_weight(constants, total, summaries, weight_proof):
log.error("failed validating weight")
return None, None
last_ses = summaries[-1]
log.debug(f"last ses sub height {last_ses_sub_height}")
log.debug(f"last sub epoch summary hash {last_ses_hash}")
log.debug(f"last sub epoch summary hash {last_ses.get_hash()}")
# validate last ses_hash
if last_ses.get_hash() != last_ses_hash:
log.error(f"failed to validate ses hashes block height {last_ses_sub_height}. Returning summaries anyway...")
log.debug(f"last sub epoch summary hash {last_ses_hash}")
log.debug("now outputting summary hashes...")
for summary in summaries:
log.debug(f"summary hash: {summary.get_hash()}")
return summaries, sub_epoch_weight_list
return summaries, sub_epoch_weight_list
def _map_sub_epoch_summaries(
sub_blocks_for_se: uint32,
ses_hash: bytes32,
sub_epoch_data: List[SubEpochData],
curr_difficulty: uint64,
) -> Tuple[List[SubEpochSummary], uint128, List[uint128]]:
total_weight: uint128 = uint128(0)
summaries: List[SubEpochSummary] = []
sub_epoch_weight_list: List[uint128] = []
for idx, data in enumerate(sub_epoch_data):
ses = SubEpochSummary(
ses_hash,
data.reward_chain_hash,
data.num_blocks_overflow,
data.new_difficulty,
data.new_sub_slot_iters,
)
if idx < len(sub_epoch_data) - 1:
delta = 0
if idx > 0:
delta = sub_epoch_data[idx].num_blocks_overflow
log.debug(f"sub epoch {idx} start weight is {total_weight+curr_difficulty} ")
sub_epoch_weight_list.append(uint128(total_weight + curr_difficulty))
total_weight = total_weight + uint128( # type: ignore
curr_difficulty * (sub_blocks_for_se + sub_epoch_data[idx + 1].num_blocks_overflow - delta)
)
# if new epoch update diff and iters
if data.new_difficulty is not None:
curr_difficulty = data.new_difficulty
# add to dict
summaries.append(ses)
ses_hash = std_hash(ses)
# add last sub epoch weight
sub_epoch_weight_list.append(uint128(total_weight + curr_difficulty))
return summaries, total_weight, sub_epoch_weight_list
def _validate_summaries_weight(constants: ConsensusConstants, sub_epoch_data_weight, summaries, weight_proof) -> bool:
num_over = summaries[-1].num_blocks_overflow
ses_end_height = (len(summaries) - 1) * constants.SUB_EPOCH_BLOCKS + num_over - 1
curr = None
for block in weight_proof.recent_chain_data:
if block.reward_chain_block.height == ses_end_height:
curr = block
if curr is None:
return False
return curr.reward_chain_block.weight == sub_epoch_data_weight
def _validate_sub_epoch_segments(
constants_dict: Dict,
rng: random.Random,
weight_proof_bytes: bytes,
summaries_bytes: List[bytes],
):
constants, summaries = bytes_to_vars(constants_dict, summaries_bytes)
sub_epoch_segments: SubEpochSegments = SubEpochSegments.from_bytes(weight_proof_bytes)
rc_sub_slot_hash = constants.GENESIS_CHALLENGE
total_blocks, total_ip_iters = 0, 0
total_slot_iters, total_slots = 0, 0
total_ip_iters = 0
prev_ses: Optional[SubEpochSummary] = None
segments_by_sub_epoch = map_segments_by_sub_epoch(sub_epoch_segments.challenge_segments)
curr_ssi = constants.SUB_SLOT_ITERS_STARTING
vdfs_to_validate = []
for sub_epoch_n, segments in segments_by_sub_epoch.items():
prev_ssi = curr_ssi
curr_difficulty, curr_ssi = _get_curr_diff_ssi(constants, sub_epoch_n, summaries)
log.debug(f"validate sub epoch {sub_epoch_n}")
# recreate RewardChainSubSlot for next ses rc_hash
sampled_seg_index = rng.choice(range(len(segments)))
if sub_epoch_n > 0:
rc_sub_slot = __get_rc_sub_slot(constants, segments[0], summaries, curr_ssi)
prev_ses = summaries[sub_epoch_n - 1]
rc_sub_slot_hash = rc_sub_slot.get_hash()
if not summaries[sub_epoch_n].reward_chain_hash == rc_sub_slot_hash:
log.error(f"failed reward_chain_hash validation sub_epoch {sub_epoch_n}")
return False
for idx, segment in enumerate(segments):
valid_segment, ip_iters, slot_iters, slots, vdf_list = _validate_segment(
constants, segment, curr_ssi, prev_ssi, curr_difficulty, prev_ses, idx == 0, sampled_seg_index == idx
)
vdfs_to_validate.extend(vdf_list)
if not valid_segment:
log.error(f"failed to validate sub_epoch {segment.sub_epoch_n} segment {idx} slots")
return False
prev_ses = None
total_blocks += 1
total_slot_iters += slot_iters
total_slots += slots
total_ip_iters += ip_iters
return True, vdfs_to_validate
def _validate_segment(
constants: ConsensusConstants,
segment: SubEpochChallengeSegment,
curr_ssi: uint64,
prev_ssi: uint64,
curr_difficulty: uint64,
ses: Optional[SubEpochSummary],
first_segment_in_se: bool,
sampled: bool,
) -> Tuple[bool, int, int, int, List[Tuple[VDFProof, ClassgroupElement, VDFInfo]]]:
ip_iters, slot_iters, slots = 0, 0, 0
after_challenge = False
to_validate = []
for idx, sub_slot_data in enumerate(segment.sub_slots):
if sampled and sub_slot_data.is_challenge():
after_challenge = True
required_iters = __validate_pospace(constants, segment, idx, curr_difficulty, ses, first_segment_in_se)
if required_iters is None:
return False, uint64(0), uint64(0), uint64(0), []
assert sub_slot_data.signage_point_index is not None
ip_iters = ip_iters + calculate_ip_iters( # type: ignore
constants, curr_ssi, sub_slot_data.signage_point_index, required_iters
)
vdf_list = _get_challenge_block_vdfs(constants, idx, segment.sub_slots, curr_ssi)
to_validate.extend(vdf_list)
elif sampled and after_challenge:
validated, vdf_list = _validate_sub_slot_data(constants, idx, segment.sub_slots, curr_ssi)
if not validated:
log.error(f"failed to validate sub slot data {idx} vdfs")
return False, uint64(0), uint64(0), uint64(0), []
to_validate.extend(vdf_list)
slot_iters = slot_iters + curr_ssi # type: ignore
slots = slots + uint64(1) # type: ignore
return True, ip_iters, slot_iters, slots, to_validate
def _get_challenge_block_vdfs(
constants: ConsensusConstants,
sub_slot_idx: int,
sub_slots: List[SubSlotData],
ssi: uint64,
) -> List[Tuple[VDFProof, ClassgroupElement, VDFInfo]]:
to_validate = []
sub_slot_data = sub_slots[sub_slot_idx]
if sub_slot_data.cc_signage_point is not None and sub_slot_data.cc_sp_vdf_info:
assert sub_slot_data.signage_point_index
sp_input = ClassgroupElement.get_default_element()
if not sub_slot_data.cc_signage_point.normalized_to_identity and sub_slot_idx >= 1:
is_overflow = is_overflow_block(constants, sub_slot_data.signage_point_index)
prev_ssd = sub_slots[sub_slot_idx - 1]
sp_input = sub_slot_data_vdf_input(
constants, sub_slot_data, sub_slot_idx, sub_slots, is_overflow, prev_ssd.is_end_of_slot(), ssi
)
to_validate.append((sub_slot_data.cc_signage_point, sp_input, sub_slot_data.cc_sp_vdf_info))
assert sub_slot_data.cc_infusion_point
assert sub_slot_data.cc_ip_vdf_info
ip_input = ClassgroupElement.get_default_element()
cc_ip_vdf_info = sub_slot_data.cc_ip_vdf_info
if not sub_slot_data.cc_infusion_point.normalized_to_identity and sub_slot_idx >= 1:
prev_ssd = sub_slots[sub_slot_idx - 1]
if prev_ssd.cc_slot_end is None:
assert prev_ssd.cc_ip_vdf_info
assert prev_ssd.total_iters
assert sub_slot_data.total_iters
ip_input = prev_ssd.cc_ip_vdf_info.output
ip_vdf_iters = uint64(sub_slot_data.total_iters - prev_ssd.total_iters)
cc_ip_vdf_info = VDFInfo(
sub_slot_data.cc_ip_vdf_info.challenge, ip_vdf_iters, sub_slot_data.cc_ip_vdf_info.output
)
to_validate.append((sub_slot_data.cc_infusion_point, ip_input, cc_ip_vdf_info))
return to_validate
def _validate_sub_slot_data(
constants: ConsensusConstants,
sub_slot_idx: int,
sub_slots: List[SubSlotData],
ssi: uint64,
) -> Tuple[bool, List[Tuple[VDFProof, ClassgroupElement, VDFInfo]]]:
sub_slot_data = sub_slots[sub_slot_idx]
assert sub_slot_idx > 0
prev_ssd = sub_slots[sub_slot_idx - 1]
to_validate = []
if sub_slot_data.is_end_of_slot():
if sub_slot_data.icc_slot_end is not None:
input = ClassgroupElement.get_default_element()
if not sub_slot_data.icc_slot_end.normalized_to_identity and prev_ssd.icc_ip_vdf_info is not None:
assert prev_ssd.icc_ip_vdf_info
input = prev_ssd.icc_ip_vdf_info.output
assert sub_slot_data.icc_slot_end_info
to_validate.append((sub_slot_data.icc_slot_end, input, sub_slot_data.icc_slot_end_info))
assert sub_slot_data.cc_slot_end_info
assert sub_slot_data.cc_slot_end
input = ClassgroupElement.get_default_element()
if (not prev_ssd.is_end_of_slot()) and (not sub_slot_data.cc_slot_end.normalized_to_identity):
assert prev_ssd.cc_ip_vdf_info
input = prev_ssd.cc_ip_vdf_info.output
if not sub_slot_data.cc_slot_end.is_valid(constants, input, sub_slot_data.cc_slot_end_info):
log.error(f"failed cc slot end validation {sub_slot_data.cc_slot_end_info}")
return False, []
else:
# find end of slot
idx = sub_slot_idx
while idx < len(sub_slots) - 1:
curr_slot = sub_slots[idx]
if curr_slot.is_end_of_slot():
# dont validate intermediate vdfs if slot is blue boxed
assert curr_slot.cc_slot_end
if curr_slot.cc_slot_end.normalized_to_identity is True:
log.debug(f"skip intermediate vdfs slot {sub_slot_idx}")
return True, to_validate
else:
break
idx += 1
if sub_slot_data.icc_infusion_point is not None and sub_slot_data.icc_ip_vdf_info is not None:
input = ClassgroupElement.get_default_element()
if not prev_ssd.is_challenge() and prev_ssd.icc_ip_vdf_info is not None:
input = prev_ssd.icc_ip_vdf_info.output
to_validate.append((sub_slot_data.icc_infusion_point, input, sub_slot_data.icc_ip_vdf_info))
assert sub_slot_data.signage_point_index is not None
if sub_slot_data.cc_signage_point:
assert sub_slot_data.cc_sp_vdf_info
input = ClassgroupElement.get_default_element()
if not sub_slot_data.cc_signage_point.normalized_to_identity:
is_overflow = is_overflow_block(constants, sub_slot_data.signage_point_index)
input = sub_slot_data_vdf_input(
constants, sub_slot_data, sub_slot_idx, sub_slots, is_overflow, prev_ssd.is_end_of_slot(), ssi
)
to_validate.append((sub_slot_data.cc_signage_point, input, sub_slot_data.cc_sp_vdf_info))
input = ClassgroupElement.get_default_element()
assert sub_slot_data.cc_ip_vdf_info
assert sub_slot_data.cc_infusion_point
cc_ip_vdf_info = sub_slot_data.cc_ip_vdf_info
if not sub_slot_data.cc_infusion_point.normalized_to_identity and prev_ssd.cc_slot_end is None:
assert prev_ssd.cc_ip_vdf_info
input = prev_ssd.cc_ip_vdf_info.output
assert sub_slot_data.total_iters
assert prev_ssd.total_iters
ip_vdf_iters = uint64(sub_slot_data.total_iters - prev_ssd.total_iters)
cc_ip_vdf_info = VDFInfo(
sub_slot_data.cc_ip_vdf_info.challenge, ip_vdf_iters, sub_slot_data.cc_ip_vdf_info.output
)
to_validate.append((sub_slot_data.cc_infusion_point, input, cc_ip_vdf_info))
return True, to_validate
def sub_slot_data_vdf_input(
constants: ConsensusConstants,
sub_slot_data: SubSlotData,
sub_slot_idx: int,
sub_slots: List[SubSlotData],
is_overflow: bool,
new_sub_slot: bool,
ssi: uint64,
) -> ClassgroupElement:
cc_input = ClassgroupElement.get_default_element()
sp_total_iters = get_sp_total_iters(constants, is_overflow, ssi, sub_slot_data)
ssd: Optional[SubSlotData] = None
if is_overflow and new_sub_slot:
if sub_slot_idx >= 2:
if sub_slots[sub_slot_idx - 2].cc_slot_end_info is None:
for ssd_idx in reversed(range(0, sub_slot_idx - 1)):
ssd = sub_slots[ssd_idx]
if ssd.cc_slot_end_info is not None:
ssd = sub_slots[ssd_idx + 1]
break
if not (ssd.total_iters > sp_total_iters):
break
if ssd and ssd.cc_ip_vdf_info is not None:
if ssd.total_iters < sp_total_iters:
cc_input = ssd.cc_ip_vdf_info.output
return cc_input
elif not is_overflow and not new_sub_slot:
for ssd_idx in reversed(range(0, sub_slot_idx)):
ssd = sub_slots[ssd_idx]
if ssd.cc_slot_end_info is not None:
ssd = sub_slots[ssd_idx + 1]
break
if not (ssd.total_iters > sp_total_iters):
break
assert ssd is not None
if ssd.cc_ip_vdf_info is not None:
if ssd.total_iters < sp_total_iters:
cc_input = ssd.cc_ip_vdf_info.output
return cc_input
elif not new_sub_slot and is_overflow:
slots_seen = 0
for ssd_idx in reversed(range(0, sub_slot_idx)):
ssd = sub_slots[ssd_idx]
if ssd.cc_slot_end_info is not None:
slots_seen += 1
if slots_seen == 2:
return ClassgroupElement.get_default_element()
if ssd.cc_slot_end_info is None and not (ssd.total_iters > sp_total_iters):
break
assert ssd is not None
if ssd.cc_ip_vdf_info is not None:
if ssd.total_iters < sp_total_iters:
cc_input = ssd.cc_ip_vdf_info.output
return cc_input
def validate_recent_blocks(
constants: ConsensusConstants,
recent_chain: RecentChainData,
summaries: List[SubEpochSummary],
shutdown_file_path: Optional[pathlib.Path] = None,
) -> Tuple[bool, List[bytes]]:
sub_blocks = BlockCache({})
first_ses_idx = _get_ses_idx(recent_chain.recent_chain_data)
ses_idx = len(summaries) - len(first_ses_idx)
ssi: uint64 = constants.SUB_SLOT_ITERS_STARTING
diff: uint64 = constants.DIFFICULTY_STARTING
last_blocks_to_validate = 100 # todo remove cap after benchmarks
for summary in summaries[:ses_idx]:
if summary.new_sub_slot_iters is not None:
ssi = summary.new_sub_slot_iters
if summary.new_difficulty is not None:
diff = summary.new_difficulty
ses_blocks, sub_slots, transaction_blocks = 0, 0, 0
challenge, prev_challenge = recent_chain.recent_chain_data[0].reward_chain_block.pos_ss_cc_challenge_hash, None
tip_height = recent_chain.recent_chain_data[-1].height
prev_block_record = None
deficit = uint8(0)
adjusted = False
for idx, block in enumerate(recent_chain.recent_chain_data):
required_iters = uint64(0)
overflow = False
ses = False
height = block.height
for sub_slot in block.finished_sub_slots:
prev_challenge = challenge
challenge = sub_slot.challenge_chain.get_hash()
deficit = sub_slot.reward_chain.deficit
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
ses = True
assert summaries[ses_idx].get_hash() == sub_slot.challenge_chain.subepoch_summary_hash
ses_idx += 1
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
ssi = sub_slot.challenge_chain.new_sub_slot_iters
if sub_slot.challenge_chain.new_difficulty is not None:
diff = sub_slot.challenge_chain.new_difficulty
if (challenge is not None) and (prev_challenge is not None):
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
if not adjusted:
prev_block_record = dataclasses.replace(
prev_block_record, deficit=deficit % constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
)
assert prev_block_record is not None
sub_blocks.add_block_record(prev_block_record)
adjusted = True
deficit = get_deficit(constants, deficit, prev_block_record, overflow, len(block.finished_sub_slots))
log.debug(f"wp, validate block {block.height}")
if sub_slots > 2 and transaction_blocks > 11 and (tip_height - block.height < last_blocks_to_validate):
caluclated_required_iters, _, error = validate_finished_header_block(
constants, sub_blocks, block, False, diff, ssi, ses_blocks > 2
)
if error is not None:
log.error(f"block {block.header_hash} failed validation {error}")
return False, []
assert caluclated_required_iters is not None
required_iters = caluclated_required_iters
else:
required_iters = _validate_pospace_recent_chain(
constants, block, challenge, diff, overflow, prev_challenge
)
if required_iters is None:
return False, []
curr_block_ses = None if not ses else summaries[ses_idx - 1]
block_record = header_block_to_sub_block_record(
constants, required_iters, block, ssi, overflow, deficit, height, curr_block_ses
)
log.debug(f"add block {block_record.height} to tmp sub blocks")
sub_blocks.add_block_record(block_record)
if block.first_in_sub_slot:
sub_slots += 1
if block.is_transaction_block:
transaction_blocks += 1
if ses:
ses_blocks += 1
prev_block_record = block_record
if shutdown_file_path is not None and not shutdown_file_path.is_file():
log.info(f"cancelling block {block.header_hash} validation, shutdown requested")
return False, []
return True, [bytes(sub) for sub in sub_blocks._block_records.values()]
def _validate_recent_blocks(constants_dict: Dict, recent_chain_bytes: bytes, summaries_bytes: List[bytes]) -> bool:
constants, summaries = bytes_to_vars(constants_dict, summaries_bytes)
recent_chain: RecentChainData = RecentChainData.from_bytes(recent_chain_bytes)
success, records = validate_recent_blocks(constants, recent_chain, summaries)
return success
def _validate_recent_blocks_and_get_records(
constants_dict: Dict,
recent_chain_bytes: bytes,
summaries_bytes: List[bytes],
shutdown_file_path: Optional[pathlib.Path] = None,
) -> Tuple[bool, List[bytes]]:
constants, summaries = bytes_to_vars(constants_dict, summaries_bytes)
recent_chain: RecentChainData = RecentChainData.from_bytes(recent_chain_bytes)
return validate_recent_blocks(constants, recent_chain, summaries, shutdown_file_path)
def _validate_pospace_recent_chain(
constants: ConsensusConstants,
block: HeaderBlock,
challenge: bytes32,
diff: uint64,
overflow: bool,
prev_challenge: bytes32,
):
if block.reward_chain_block.challenge_chain_sp_vdf is None:
# Edge case of first sp (start of slot), where sp_iters == 0
cc_sp_hash: bytes32 = challenge
else:
cc_sp_hash = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
assert cc_sp_hash is not None
q_str = block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants,
challenge if not overflow else prev_challenge,
cc_sp_hash,
)
if q_str is None:
log.error(f"could not verify proof of space block {block.height} {overflow}")
return None
required_iters = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
block.reward_chain_block.proof_of_space.size,
diff,
0.5,
cc_sp_hash,
)
return required_iters
def __validate_pospace(
constants: ConsensusConstants,
segment: SubEpochChallengeSegment,
idx: int,
curr_diff: uint64,
ses: Optional[SubEpochSummary],
first_in_sub_epoch: bool,
) -> Optional[uint64]:
if first_in_sub_epoch and segment.sub_epoch_n == 0 and idx == 0:
cc_sub_slot_hash = constants.GENESIS_CHALLENGE
else:
cc_sub_slot_hash = __get_cc_sub_slot(segment.sub_slots, idx, ses).get_hash()
sub_slot_data: SubSlotData = segment.sub_slots[idx]
if sub_slot_data.signage_point_index and is_overflow_block(constants, sub_slot_data.signage_point_index):
curr_slot = segment.sub_slots[idx - 1]
assert curr_slot.cc_slot_end_info
challenge = curr_slot.cc_slot_end_info.challenge
else:
challenge = cc_sub_slot_hash
if sub_slot_data.cc_sp_vdf_info is None:
cc_sp_hash = cc_sub_slot_hash
else:
cc_sp_hash = sub_slot_data.cc_sp_vdf_info.output.get_hash()
# validate proof of space
assert sub_slot_data.proof_of_space is not None
q_str = sub_slot_data.proof_of_space.verify_and_get_quality_string(
constants,
challenge,
cc_sp_hash,
)
if q_str is None:
log.error("could not verify proof of space")
return None
return calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
sub_slot_data.proof_of_space.size,
curr_diff,
0.5,
cc_sp_hash,
)
def __get_rc_sub_slot(
constants: ConsensusConstants,
segment: SubEpochChallengeSegment,
summaries: List[SubEpochSummary],
curr_ssi: uint64,
) -> RewardChainSubSlot:
ses = summaries[uint32(segment.sub_epoch_n - 1)]
# find first challenge in sub epoch
first_idx = None
first = None
for idx, curr in enumerate(segment.sub_slots):
if curr.cc_slot_end is None:
first_idx = idx
first = curr
break
assert first_idx
idx = first_idx
slots = segment.sub_slots
# number of slots to look for
slots_n = 1
assert first
assert first.signage_point_index is not None
if is_overflow_block(constants, first.signage_point_index):
if idx >= 2 and slots[idx - 2].cc_slot_end is None:
slots_n = 2
new_diff = None if ses is None else ses.new_difficulty
new_ssi = None if ses is None else ses.new_sub_slot_iters
ses_hash = None if ses is None else ses.get_hash()
overflow = is_overflow_block(constants, first.signage_point_index)
if overflow:
if idx >= 2 and slots[idx - 2].cc_slot_end is not None and slots[idx - 1].cc_slot_end is not None:
ses_hash = None
new_ssi = None
new_diff = None
sub_slot = slots[idx]
while True:
if sub_slot.cc_slot_end:
slots_n -= 1
if slots_n == 0:
break
idx -= 1
sub_slot = slots[idx]
icc_sub_slot_hash: Optional[bytes32] = None
assert sub_slot is not None
assert sub_slot.cc_slot_end_info is not None
assert segment.rc_slot_end_info is not None
if idx != 0:
cc_vdf_info = VDFInfo(sub_slot.cc_slot_end_info.challenge, curr_ssi, sub_slot.cc_slot_end_info.output)
if sub_slot.icc_slot_end_info is not None:
icc_slot_end_info = VDFInfo(
sub_slot.icc_slot_end_info.challenge, curr_ssi, sub_slot.icc_slot_end_info.output
)
icc_sub_slot_hash = icc_slot_end_info.get_hash()
else:
cc_vdf_info = sub_slot.cc_slot_end_info
if sub_slot.icc_slot_end_info is not None:
icc_sub_slot_hash = sub_slot.icc_slot_end_info.get_hash()
cc_sub_slot = ChallengeChainSubSlot(
cc_vdf_info,
icc_sub_slot_hash,
ses_hash,
new_ssi,
new_diff,
)
rc_sub_slot = RewardChainSubSlot(
segment.rc_slot_end_info,
cc_sub_slot.get_hash(),
icc_sub_slot_hash,
constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK,
)
return rc_sub_slot
def __get_cc_sub_slot(sub_slots: List[SubSlotData], idx, ses: Optional[SubEpochSummary]) -> ChallengeChainSubSlot:
sub_slot: Optional[SubSlotData] = None
for i in reversed(range(0, idx)):
sub_slot = sub_slots[i]
if sub_slot.cc_slot_end_info is not None:
break
assert sub_slot is not None
assert sub_slot.cc_slot_end_info is not None
icc_vdf = sub_slot.icc_slot_end_info
icc_vdf_hash: Optional[bytes32] = None
if icc_vdf is not None:
icc_vdf_hash = icc_vdf.get_hash()
cc_sub_slot = ChallengeChainSubSlot(
sub_slot.cc_slot_end_info,
icc_vdf_hash,
None if ses is None else ses.get_hash(),
None if ses is None else ses.new_sub_slot_iters,
None if ses is None else ses.new_difficulty,
)
return cc_sub_slot
def _get_curr_diff_ssi(constants: ConsensusConstants, idx, summaries):
curr_difficulty = constants.DIFFICULTY_STARTING
curr_ssi = constants.SUB_SLOT_ITERS_STARTING
for ses in reversed(summaries[0:idx]):
if ses.new_sub_slot_iters is not None:
curr_ssi = ses.new_sub_slot_iters
curr_difficulty = ses.new_difficulty
break
return curr_difficulty, curr_ssi
def vars_to_bytes(constants: ConsensusConstants, summaries: List[SubEpochSummary], weight_proof: WeightProof):
constants_dict = recurse_jsonify(dataclasses.asdict(constants))
wp_recent_chain_bytes = bytes(RecentChainData(weight_proof.recent_chain_data))
wp_segment_bytes = bytes(SubEpochSegments(weight_proof.sub_epoch_segments))
summary_bytes = []
for summary in summaries:
summary_bytes.append(bytes(summary))
return constants_dict, summary_bytes, wp_segment_bytes, wp_recent_chain_bytes
def bytes_to_vars(constants_dict, summaries_bytes):
summaries = []
for summary in summaries_bytes:
summaries.append(SubEpochSummary.from_bytes(summary))
constants: ConsensusConstants = dataclass_from_dict(ConsensusConstants, constants_dict)
return constants, summaries
def _get_last_ses_hash(
constants: ConsensusConstants, recent_reward_chain: List[HeaderBlock]
) -> Tuple[Optional[bytes32], uint32]:
for idx, block in enumerate(reversed(recent_reward_chain)):
log.debug(f"looking at reward block height: {block.reward_chain_block.height}")
if (block.reward_chain_block.height % constants.SUB_EPOCH_BLOCKS) == 0:
idx = len(recent_reward_chain) - 1 - idx # reverse
# find first block after sub slot end
while idx < len(recent_reward_chain):
curr = recent_reward_chain[idx]
if len(curr.finished_sub_slots) > 0:
for slot in curr.finished_sub_slots:
if slot.challenge_chain.subepoch_summary_hash is not None:
log.debug(f"found ses hash at reward block height: {block.reward_chain_block.height}")
log.debug(f"reward chain block height is {curr.reward_chain_block.height}")
log.debug(f"ses hash is: {slot.challenge_chain.subepoch_summary_hash}")
return (
slot.challenge_chain.subepoch_summary_hash,
curr.reward_chain_block.height,
)
idx += 1
return None, uint32(0)
def _get_ses_idx(recent_reward_chain: List[HeaderBlock]) -> List[int]:
idxs: List[int] = []
for idx, curr in enumerate(recent_reward_chain):
if len(curr.finished_sub_slots) > 0:
for slot in curr.finished_sub_slots:
if slot.challenge_chain.subepoch_summary_hash is not None:
idxs.append(idx)
return idxs
def get_deficit(
constants: ConsensusConstants,
curr_deficit: uint8,
prev_block: Optional[BlockRecord],
overflow: bool,
num_finished_sub_slots: int,
) -> uint8:
if prev_block is None:
if curr_deficit >= 1 and not (overflow and curr_deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK):
curr_deficit = uint8(curr_deficit - 1)
return curr_deficit
return calculate_deficit(constants, uint32(prev_block.height + 1), prev_block, overflow, num_finished_sub_slots)
def get_sp_total_iters(constants: ConsensusConstants, is_overflow: bool, ssi: uint64, sub_slot_data: SubSlotData):
assert sub_slot_data.cc_ip_vdf_info is not None
assert sub_slot_data.total_iters is not None
assert sub_slot_data.signage_point_index is not None
sp_iters: uint64 = calculate_sp_iters(constants, ssi, sub_slot_data.signage_point_index)
ip_iters: uint64 = sub_slot_data.cc_ip_vdf_info.number_of_iterations
sp_sub_slot_total_iters = uint128(sub_slot_data.total_iters - ip_iters)
if is_overflow:
sp_sub_slot_total_iters = uint128(sp_sub_slot_total_iters - ssi)
return sp_sub_slot_total_iters + sp_iters
def blue_boxed_end_of_slot(sub_slot: EndOfSubSlotBundle):
if sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity:
if sub_slot.proofs.infused_challenge_chain_slot_proof is not None:
if sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity:
return True
else:
return True
return False
def validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof):
tip = weight_proof.recent_chain_data[-1]
weight_to_check = _get_weights_for_sampling(rng, tip.weight, weight_proof.recent_chain_data)
sampled_sub_epochs: dict[int, bool] = {}
for idx in range(1, len(sub_epoch_weight_list)):
if _sample_sub_epoch(sub_epoch_weight_list[idx - 1], sub_epoch_weight_list[idx], weight_to_check):
sampled_sub_epochs[idx - 1] = True
if len(sampled_sub_epochs) == WeightProofHandler.MAX_SAMPLES:
break
curr_sub_epoch_n = -1
for sub_epoch_segment in weight_proof.sub_epoch_segments:
if curr_sub_epoch_n < sub_epoch_segment.sub_epoch_n:
if sub_epoch_segment.sub_epoch_n in sampled_sub_epochs:
del sampled_sub_epochs[sub_epoch_segment.sub_epoch_n]
curr_sub_epoch_n = sub_epoch_segment.sub_epoch_n
if len(sampled_sub_epochs) > 0:
return False
return True
def map_segments_by_sub_epoch(sub_epoch_segments) -> Dict[int, List[SubEpochChallengeSegment]]:
segments: Dict[int, List[SubEpochChallengeSegment]] = {}
curr_sub_epoch_n = -1
for idx, segment in enumerate(sub_epoch_segments):
if curr_sub_epoch_n < segment.sub_epoch_n:
curr_sub_epoch_n = segment.sub_epoch_n
segments[curr_sub_epoch_n] = []
segments[curr_sub_epoch_n].append(segment)
return segments
def validate_total_iters(
segment: SubEpochChallengeSegment,
sub_slot_data_idx,
expected_sub_slot_iters: uint64,
finished_sub_slots_since_prev: int,
prev_b: SubSlotData,
prev_sub_slot_data_iters,
genesis,
) -> bool:
sub_slot_data = segment.sub_slots[sub_slot_data_idx]
if genesis:
total_iters: uint128 = uint128(expected_sub_slot_iters * finished_sub_slots_since_prev)
elif segment.sub_slots[sub_slot_data_idx - 1].is_end_of_slot():
assert prev_b.total_iters
assert prev_b.cc_ip_vdf_info
total_iters = prev_b.total_iters
# Add the rest of the slot of prev_b
total_iters = uint128(total_iters + prev_sub_slot_data_iters - prev_b.cc_ip_vdf_info.number_of_iterations)
# Add other empty slots
total_iters = uint128(total_iters + (expected_sub_slot_iters * (finished_sub_slots_since_prev - 1)))
else:
# Slot iters is guaranteed to be the same for header_block and prev_b
# This takes the beginning of the slot, and adds ip_iters
assert prev_b.cc_ip_vdf_info
assert prev_b.total_iters
total_iters = uint128(prev_b.total_iters - prev_b.cc_ip_vdf_info.number_of_iterations)
total_iters = uint128(total_iters + sub_slot_data.cc_ip_vdf_info.number_of_iterations)
return total_iters == sub_slot_data.total_iters
def _validate_vdf_batch(
constants_dict, vdf_list: List[Tuple[bytes, bytes, bytes]], shutdown_file_path: Optional[pathlib.Path] = None
):
constants: ConsensusConstants = dataclass_from_dict(ConsensusConstants, constants_dict)
for vdf_proof_bytes, class_group_bytes, info in vdf_list:
vdf = VDFProof.from_bytes(vdf_proof_bytes)
class_group = ClassgroupElement.from_bytes(class_group_bytes)
vdf_info = VDFInfo.from_bytes(info)
if not vdf.is_valid(constants, class_group, vdf_info):
return False
if shutdown_file_path is not None and not shutdown_file_path.is_file():
log.info("cancelling VDF validation, shutdown requested")
return False
return True
| 42.407895
| 120
| 0.673935
|
8345a944ee976ab90089406f246bb3ea238a3e3b
| 1,587
|
py
|
Python
|
src/Plugin/HtmlWrapper.py
|
eblade/pyroman
|
a544eb2e13b45a20e4f11b28c7b349e27594452e
|
[
"MIT"
] | null | null | null |
src/Plugin/HtmlWrapper.py
|
eblade/pyroman
|
a544eb2e13b45a20e4f11b28c7b349e27594452e
|
[
"MIT"
] | null | null | null |
src/Plugin/HtmlWrapper.py
|
eblade/pyroman
|
a544eb2e13b45a20e4f11b28c7b349e27594452e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import G
from Utils import getkey
from Generic import Generic
class HtmlWrapper(Generic):
def __init__(self):
super(HtmlWrapper, self).__init__()
self.init()
def post_upgrade(self):
# Register me as wrapper
G.info("Registering wrapper")
self.globalvars['$Wrapper'] = self
return
## @fn process
#
# Processes the template with data from the hashes. Builds a text representation
# of the object using the templates.
def process(self):
template_name = getkey(self.arguments, 'template', self.object_name)
template = getkey(self.globalvars['$Templates'], template_name)
self.body = getkey(template, 'wrapper')
self.pre_process()
# Insert localvars
for k in self.localvars:
G.debug(''.join(['Replacing localvar: ',str(k),'.']))
self.body = self.body.replace(''.join(['%',str(k),'%']),unicode(self.localvars[k]))
# Insert arguments
for k in self.arguments:
G.debug(''.join(['Replacing argument: ',str(k),'.']))
self.body = self.body.replace(''.join(['%',str(k),'%']),unicode(self.arguments[k]))
# Insert globalvars
for k in self.globalvars:
if not k[0] == '$': # non-string start with $
G.debug(''.join(['Replacing globalvar: ',str(k),'.']))
self.body = self.body.replace(''.join(['%',str(k),'%']),unicode(self.globalvars[k]))
# Insert content
G.debug(''.join(['Calculated body: ', self.body]))
| 36.906977
| 101
| 0.58034
|
051bc3eaf18cff089ebf2f67bc8784a7ed688138
| 75
|
py
|
Python
|
django-html5-boilerplate/__init__.py
|
damianmoore/django-html5-boilerplate
|
7e99f60c5bb73c8a45b1dc3dbbe2caf8c120ee47
|
[
"BSD-2-Clause"
] | 3
|
2019-08-16T13:38:33.000Z
|
2021-12-15T04:00:04.000Z
|
django-html5-boilerplate/__init__.py
|
damianmoore/django-html5-boilerplate
|
7e99f60c5bb73c8a45b1dc3dbbe2caf8c120ee47
|
[
"BSD-2-Clause"
] | null | null | null |
django-html5-boilerplate/__init__.py
|
damianmoore/django-html5-boilerplate
|
7e99f60c5bb73c8a45b1dc3dbbe2caf8c120ee47
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2011, Damian Moore
# see LICENSE file for license details
| 18.75
| 38
| 0.746667
|
a04ed21c9e79c29a571b79835ada26a1632944f9
| 5,305
|
py
|
Python
|
get_metadata_from_oracle.py
|
Neil-Symington/borehole_gfx_utils
|
bfcf37fa07621a3234bb57be264f15540ce95ec5
|
[
"Apache-2.0"
] | null | null | null |
get_metadata_from_oracle.py
|
Neil-Symington/borehole_gfx_utils
|
bfcf37fa07621a3234bb57be264f15540ce95ec5
|
[
"Apache-2.0"
] | null | null | null |
get_metadata_from_oracle.py
|
Neil-Symington/borehole_gfx_utils
|
bfcf37fa07621a3234bb57be264f15540ce95ec5
|
[
"Apache-2.0"
] | null | null | null |
# Script that populates columns and rows in the master spreadsheet using data within another spreadsheet extracted
# from the oracle rock properties database. This script assumes that the master spreadsheet already contains the borehole
# enos (or UWI), which is required for the oracle query.
# NOte that as data in oracle in in variable condition we don't recommend simply clicking run on this script. the
# processing will need to be specific to the data
import pandas as pd
import lasio
import numpy as np
import yaml
# load the required fields from the yaml file
yaml_file = "lasfile_required_fields.yaml"
fields = yaml.safe_load(open(yaml_file))['metadata_fields']
## TODO include a sql query for extracting the oracle data
oracle_sheet = '/home/nsymington/Documents/GA/inductionGamma/EK/induction_gamma_header.csv'
df_or = pd.read_csv(oracle_sheet)
master_spreadsheet = 'EK_induction_master.csv'
df_master = pd.read_csv(master_spreadsheet, keep_default_na=False)
# Now extract the ones that have been assigned as essential as well as their data types
essential_fields = {}
keys = ['COMP', 'DATE', 'X', 'Y', 'GDAT', 'HZCS', 'LMF', 'APD', 'STRT', 'STOP', "NULL"]
def extract_step_from_las(las):
"""
:param las: object
las class from lasio
:return:
the depth interval or STEP
"""
intervals = las['DEPT'][1:] - las['DEPT'][:-1]
# due to floating point errors we will round it to 3 decimal places and find the uniqe value
return np.unique(np.round(intervals, 3))[0]
for item in keys:
# Convert the data type to a numpy class
if isinstance(fields[item]['dtype'], (list, tuple)):
essential_fields[item] = tuple([getattr(np, d) for d in fields[item]['dtype']])
elif isinstance(fields[item]['dtype'], (str)):
essential_fields[item] = (getattr(np,fields[item]['dtype']))
# Iterate though our master sheet and get the necessary field from the las file
for index, row in df_master.iterrows():
print(row['WELL'])
# First we populate the fields that are already in the
las_path = row['local_path']
las = lasio.read(las_path)
# Extract the step since this is always wrong
step = extract_step_from_las(las)
df_master.at[index, 'STEP'] = step
for item in keys:
try:
header_item = getattr(las.well, item)
value = header_item.value
# Check data types
if not isinstance(value, (essential_fields[item])):
print("Check data type for {}".format(item))
# For cases where the metadata value is an empty string
if len(str(value)) > 0:
df_master.at[index, item] = value
else:
pass
except AttributeError:
pass
# Get the necessary field from the oracle spreadsheet. Since the db is our 'point of truth' we will over write
# any of the x, y, z fields that are in the las file header
df_merged = df_master.merge(df_or, left_on = ['UWI', 'DEPTH_REFERENCE_ID'],
right_on = ['ENO', 'DEPTH_REFERENCE_TYPE_ID'])
# Unless there are nulls we will replace the X values in the master spreadsheet
xmask = pd.notnull(df_merged['ORIG_X_LONGITUDE'])
df_master.at[xmask, 'X'] = df_merged[xmask]['ORIG_X_LONGITUDE'].values
ymask = pd.notnull(df_merged['ORIG_Y_LATITUDE'])
df_master.at[ymask, 'Y'] = df_merged[ymask]['ORIG_Y_LATITUDE'].values
gdat_mask = pd.notnull(df_merged["ELEVATION_DATUM"])
df_master.at[gdat_mask, 'GDAT'] = df_merged[gdat_mask]["ELEVATION_DATUM"].values
hzcs_mask = pd.notnull(df_merged["ORIG_INPUT_LOCATION_DATUM"])
df_master.at[hzcs_mask, 'HZCS'] = df_merged[hzcs_mask]['ORIG_INPUT_LOCATION_DATUM'].values
# Now add the depth reference information
dr_mask = pd.notnull(df_merged['DEPTH_REFERENCE_HEIGHT'])
# Do some corrections on these heights to convert all to metres above ground
# get the unit of measurements
drd_uom = df_merged["DEPTH_REFERENCE_UOM"]
mm_mask = drd_uom == 'mm'
# drop column to reduce the risk of error
df_merged.drop(columns = "DEPTH_REFERENCE_UOM", inplace = True)
# Use the mask to convert to m
df_merged.at[mm_mask, "DEPTH_REFERENCE_HEIGHT"] = df_merged[mm_mask]['DEPTH_REFERENCE_HEIGHT'] * np.power(10,-3.)
# Convert elevation to mAHD
elev_uom = df_merged["ELEVATION_UOM"]
elev_mm_mask = elev_uom == 'mm'
# drop column to reduce the risk of error
df_merged.drop(columns = "ELEVATION_UOM",inplace = True)
df_merged.at[elev_mm_mask, "ELEVATION_VALUE"] = df_merged["ELEVATION_VALUE"] * np.power(10,-3.)
# Find where the depth reference is relative to the geoid and subtract
depth_ref_datum = df_merged['DEPTH_REFERENCE_DATUM']
ahd_mask = depth_ref_datum == "Australian height datum"
df_merged.at[ahd_mask, "DEPTH_REFERENCE_HEIGHT"] = df_merged[ahd_mask]['DEPTH_REFERENCE_HEIGHT'] - df_merged["ELEVATION_VALUE"][ahd_mask]
# drop column to reduce the risk of error
df_merged.drop(columns = "DEPTH_REFERENCE_DATUM",inplace = True)
# Add to master spreadsheet
df_master.at[dr_mask, 'LMF'] = df_merged[dr_mask]['DEPTH_REFERENCE_TYPE'] # reference from
df_master.at[dr_mask, 'APD'] = df_merged[dr_mask]['DEPTH_REFERENCE_HEIGHT'] # reference height
df_master.at[dr_mask, 'EPD'] = df_merged[dr_mask]['ELEVATION_VALUE']
df_master.to_csv('EK_induction_master_metadata.csv')
| 38.442029
| 137
| 0.719698
|
dfd076ae5ee5d5044837048b340d5926ad85a054
| 1,340
|
py
|
Python
|
fdm-devito-notebooks/01_vib/exer-vib/vib_memsave0.py
|
devitocodes/devito_book
|
30405c3d440a1f89df69594fd0704f69650c1ded
|
[
"CC-BY-4.0"
] | 7
|
2020-07-17T13:19:15.000Z
|
2021-03-27T05:21:09.000Z
|
fdm-jupyter-book/notebooks/01_vib/exer-vib/vib_memsave0.py
|
devitocodes/devito_book
|
30405c3d440a1f89df69594fd0704f69650c1ded
|
[
"CC-BY-4.0"
] | 73
|
2020-07-14T15:38:52.000Z
|
2020-09-25T11:54:59.000Z
|
fdm-jupyter-book/notebooks/01_vib/exer-vib/vib_memsave0.py
|
devitocodes/devito_book
|
30405c3d440a1f89df69594fd0704f69650c1ded
|
[
"CC-BY-4.0"
] | 1
|
2021-03-27T05:21:14.000Z
|
2021-03-27T05:21:14.000Z
|
import sys, os
sys.path.insert(0, os.path.join(os.pardir, 'src-vib'))
import numpy as np
import matplotlib.pyplot as plt
def solver_memsave(I, w, dt, T, filename='tmp.dat'):
"""
As vib_undamped.solver, but store only the last three
u values in the implementation. The solution is written to
file `tmp_memsave.dat`.
Solve u'' + w**2*u = 0 for t in (0,T], u(0)=I and u'(0)=0,
by a central finite difference method with time step dt.
"""
dt = float(dt)
Nt = int(round(T/dt))
t = np.linspace(0, Nt*dt, Nt+1)
outfile = open(filename, 'w')
u_n = I
outfile.write('%20.12f %20.12f\n' % (0, u_n))
u = u_n - 0.5*dt**2*w**2*u_n
outfile.write('%20.12f %20.12f\n' % (dt, u))
u_nm1 = u_n
u_n = u
for n in range(1, Nt):
u = 2*u_n - u_nm1 - dt**2*w**2*u_n
outfile.write('%20.12f %20.12f\n' % (t[n], u))
u_nm1 = u_n
u_n = u
return u, t
def test_solver_memsave():
from vib_undamped import solver
_, _ = solver_memsave(I=1, dt=0.1, w=1, T=30)
u_expected, _ = solver (I=1, dt=0.1, w=1, T=30)
data = np.loadtxt('tmp.dat')
u_computed = data[:,1]
diff = np.abs(u_expected - u_computed).max()
assert diff < 5E-13, diff
if __name__ == '__main__':
test_solver_memsave()
solver_memsave(I=1, w=1, dt=0.1, T=30)
| 29.130435
| 62
| 0.584328
|
94d723b5a4ea12882027cd8aee23b67ed1a3d6dd
| 21
|
py
|
Python
|
python/testData/quickFixes/PyAddSpecifierToFormatQuickFixTest/missingValues_after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/quickFixes/PyAddSpecifierToFormatQuickFixTest/missingValues_after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/quickFixes/PyAddSpecifierToFormatQuickFixTest/missingValues_after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
a = "%s test %" % "x"
| 21
| 21
| 0.333333
|
7ee356281dfdddd7301080229b7825d43ecb404b
| 43,519
|
py
|
Python
|
Configs/CI_Configs_with_datasets_before_scoretables.py
|
yochaiedlitz/T2DM_UKB_predictions
|
1e6b22e3d51d515eb065d7d5f46408f86f33d0b8
|
[
"MIT"
] | 1
|
2022-01-17T13:13:02.000Z
|
2022-01-17T13:13:02.000Z
|
Configs/CI_Configs_with_datasets_before_scoretables.py
|
yochaiedlitz/T2DM_UKB_predictions
|
1e6b22e3d51d515eb065d7d5f46408f86f33d0b8
|
[
"MIT"
] | null | null | null |
Configs/CI_Configs_with_datasets_before_scoretables.py
|
yochaiedlitz/T2DM_UKB_predictions
|
1e6b22e3d51d515eb065d7d5f46408f86f33d0b8
|
[
"MIT"
] | null | null | null |
import pandas as pd
import pickle
import os
import numpy as np
import sys
import UKBB_Functions as UKBBFF
from sklearn.metrics import roc_auc_score, make_scorer,brier_score_loss,log_loss
from sklearn.metrics import fbeta_score
class runs:
def __init__(self,run_name,only_check_model=False):
self.run_name=run_name
self.score = make_scorer(roc_auc_score, needs_proba=True)
self.class_weight="balanced"
self.hyper_parameter_iter = 200
self.num_of_bootstraps = 1000
self.batch_size=10
self.charac_selected = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All"}
self.mode=None
self.choose_model()
self.compute_CI = True
self.charac_id= {"Age at last visit": "21022-0.0", "Sex": "31-0.0", "Ethnic background": "21000-0.0",
"Type of special diet followed": "20086-0.0"}
self.new_features_file_path_for_testing = None
self.model_paths = None
self.exist_CI_files=[]
self.missing_files=[]
self.Val_file_path = None
try: # Not all models have self, mode, hence the try expression
# Use self.mode=="Exploring" when you want to explore on the training data without looking at validation data
if self.mode == "Exploring" or self.mode == "exploring" or self.mode == "explore": # Exploring so not using real val data
self.Train_file_path = "/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_train.csv"
self.Test_file_path = "/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_test.csv"
self.Train_Test_file_path = "/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_train.csv"
self.Val_file_path = "/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_test.csv"
else:
self.Train_file_path = "/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_train.csv"
self.Test_file_path = "/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_test.csv"
self.Train_Test_file_path = "/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_train_test.csv"
self.Val_file_path = "/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_val.csv"
except:
print("Using true validation data")
self.Train_file_path = "/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_train.csv"
self.Test_file_path = "/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_test.csv"
self.Train_Test_file_path = "/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_train_test.csv"
if self.Val_file_path == None:
print("Using general Val file path")
self.Val_file_path = "/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_val.csv"
if self.model_type=="SA" or self.model_type=="LR":
self.model_paths=os.path.join(self.Folder_path,self.run_name)
self.CI_results_path = os.path.join(self.model_paths, "CI")
if self.model_type=="gbdt" or self.model_type=="GBDT":
self.job_name = "Diabetes"
self.model_paths = os.path.join(self.Folder_path, self.run_name + "_" + self.job_name)
self.final_folder = os.path.join(self.model_paths, "Diabetes_Results")
self.CI_results_path = os.path.join(self.final_folder, "CI")
self.VERBOSE_EVAL = 1000
if not only_check_model:
self.Set_GBDT()
self.CI_results_path = os.path.join(self.model_paths, "Diabetes_Results/CI")
self.Training_path = os.path.join(self.model_paths, "training_Results")
self.CI_results_summary_table = os.path.join(self.CI_results_path, self.run_name + "_CI.csv")
self.hyper_parameters_summary_table = os.path.join(self.CI_results_path,
self.run_name + "_hyper_parameters_summary.csv")
self.save_model = True
if self.model_type=="LR":
self.save_model_filename = os.path.join(self.Folder_path, self.run_name, "LR_Model.sav")
if self.hyper_parameter_iter == []:
self.hyper_parameter_iter == 200
if self.num_of_bootstraps == []:
self.num_of_bootstraps=1000
if self.batch_size>self.num_of_bootstraps:
self.batch_size=self.num_of_bootstraps
self.create_dir()
self.check_exist_CI_files()
def choose_model(self):
if self.run_name == "Val_LR_Socio_demographics" \
or self.run_name == "Val_LR_Age_and_Sex" \
or self.run_name == "Val_LR_Physical_health" \
or self.run_name == "Val_LR_Mental_health" \
or self.run_name == "Val_LR_Medication" \
or self.run_name == "Val_LR_Lifestyle_and_physical_activity" \
or self.run_name == "Val_LR_HbA1c" \
or self.run_name == "Val_LR_Family_and_Ethnicity" \
or self.run_name == "Val_LR_Early_Life_Factors" \
or self.run_name == "Val_LR_Diet" \
or self.run_name == "Val_LR_BT_No_A1c_No_Gluc" \
or self.run_name == "Val_LR_BT_No_A1c" \
or self.run_name == "Val_LR_BP_and_HR" \
or self.run_name == "Val_LR_Blood_Tests" \
or self.run_name == "Val_LR_Anthropometry" \
or self.run_name == "Val_LR_All_No_gen":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Explore_Singles_LR/"
if self.run_name == "Val_LR_All_No_gen":
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/All.csv"
else:
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/" \
+ self.run_name[7:] + ".csv"
self.model_type = "LR"
self.mode="explore"
self.batch_size = 50
elif self.run_name == "Val_LR_Socio_demographics_LL_UB" \
or self.run_name == "Val_LR_Age_and_Sex_LL_UB" \
or self.run_name == "Val_LR_Physical_health_LL_UB" \
or self.run_name == "Val_LR_Mental_health_LL_UB" \
or self.run_name == "Val_LR_Medication_LL_UB" \
or self.run_name == "Val_LR_Lifestyle_and_physical_activity_LL_UB" \
or self.run_name == "Val_LR_HbA1c_LL_UB" \
or self.run_name == "Val_LR_Family_and_Ethnicity_LL_UB" \
or self.run_name == "Val_LR_Early_Life_Factors_LL_UB" \
or self.run_name == "Val_LR_Diet_LL_UB" \
or self.run_name == "Val_LR_BT_No_A1c_No_Gluc_LL_UB" \
or self.run_name == "Val_LR_BT_No_A1c_LL_UB" \
or self.run_name == "Val_LR_BP_and_HR_LL_UB" \
or self.run_name == "Val_LR_Blood_Tests" \
or self.run_name == "Val_LR_Anthropometry_LL_UB" \
or self.run_name == "Val_LR_All_No_gen_LL_UB"\
or self.run_name == "Val_LR_Blood_Tests_LL_UB"\
or self.run_name == "Val_LR_Anthropometry_LL_UB" \
or self.run_name == "Val_LR_Five_Blood_Tests_LL_UB" \
or self.run_name == "Val_LR_All_No_gen_LL_UB":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Explore/LL_UB"
if "All" in self.run_name:
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/All.csv"
else:
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/"\
+self.run_name[7:-6]+".csv"
self.model_type = "LR"
self.batch_size = 50
self.scorer=make_scorer(log_loss, needs_proba=True,greater_is_better=False)
self.class_weight = None
self.mode="explore"
elif self.run_name == "Val_LR_Blood_Tests_LL_UB" \
or self.run_name == "Val_LR_Five_Blood_Tests_LL_UB" :
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Explore/LL_UB"
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/" \
+ self.run_name[7:-6] + ".csv"
self.model_type = "LR"
self.mode="explore"
self.batch_size = 50
elif self.run_name == "Val_LR_Blood_Tests_LL_UB" \
or self.run_name == "Val_LR_Five_Blood_Tests_LL_UB" :
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Explore/LL_UB"
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/" \
+ self.run_name[7:-6] + ".csv"
self.model_type = "LR"
self.mode="explore"
self.batch_size = 50
elif self.run_name == "LR_Socio_demographics"\
or self.run_name == "LR_Age_and_Sex"\
or self.run_name == "LR_Physical_health"\
or self.run_name == "LR_Mental_health"\
or self.run_name == "LR_Medication"\
or self.run_name == "LR_Lifestyle_and_physical_activity"\
or self.run_name == "LR_HbA1c"\
or self.run_name == "LR_Family_and_Ethnicity"\
or self.run_name == "LR_Early_Life_Factors"\
or self.run_name == "LR_Diet"\
or self.run_name == "LR_BT_No_A1c_No_Gluc"\
or self.run_name == "LR_BT_No_A1c"\
or self.run_name == "LR_BP_and_HR"\
or self.run_name == "LR_Blood_Tests"\
or self.run_name == "LR_Anthropometry"\
or self.run_name == "LR_Antro_neto_whr"\
or self.run_name == "LR_Five_Blood_Tests"\
or self.run_name == "LR_All_No_gen":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Imputed_screened/New_Singles_LR/"
if self.run_name == "LR_All_No_gen":
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/All.csv"
else:
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/"\
+self.run_name[3:]+".csv"
self.model_type = "LR"
self.batch_size = 50
elif self.run_name == "LR_Blood_Tests_brier"\
or self.run_name == "LR_Anthropometry_brier" \
or self.run_name == "LR_Five_Blood_Tests_brier" \
or self.run_name == "LR_All_No_gen_brier":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/New_Singles_brier_LR/"
if self.run_name == "LR_All_No_gen_brier":
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/All.csv"
else:
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/"\
+self.run_name[3:-6]+".csv"
self.model_type = "LR"
self.batch_size = 50
self.scorer=make_scorer(brier_score_loss, needs_proba=True,greater_is_better=False)
self.class_weight = "balanced"
elif self.run_name == "LR_Blood_Tests_brier_UB"\
or self.run_name == "LR_Anthropometry_brier_UB" \
or self.run_name == "LR_Five_Blood_Tests_brier_UB" \
or self.run_name == "LR_All_No_gen_brier_UB":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/New_Singles_brier_UB_LR/"
if self.run_name == "LR_All_No_gen_brier_UB":
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/All.csv"
else:
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/"\
+self.run_name[3:-9]+".csv"
self.model_type = "LR"
self.batch_size = 50
self.scorer=make_scorer(brier_score_loss, needs_proba=True,greater_is_better=False)
self.class_weight = None
elif self.run_name == "Only_genetics" \
or self.run_name == "All_No_A1c_No_Gluc" \
or self.run_name == "Genetics_Age_and_Sex":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/New_Singles/"
self.model_type = "gbdt"
self.batch_size = 4
elif self.run_name == "Val_Only_genetics" \
or self.run_name == "Val_All_No_A1c_No_Gluc" \
or self.run_name == "Val_Genetics_Age_and_Sex":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Val_Singles/"
self.model_type = "gbdt"
self.batch_size = 4
self.mode="explore"
elif self.run_name == "Val_Age_and_Sex" \
or self.run_name == "Val_BP_and_HR" \
or self.run_name == "Val_Socio_demographics" \
or self.run_name == "Val_Family_and_Ethnicity" \
or self.run_name == "Val_Physical_health" \
or self.run_name == "Val_Mental_health" \
or self.run_name == "Val_Medication" \
or self.run_name == "Val_Lifestyle_and_physical_activity" \
or self.run_name == "Val_HbA1c" \
or self.run_name == "Val_Family_and_Ethnicity" \
or self.run_name == "Val_Early_Life_Factors" \
or self.run_name == "Val_BT_No_A1c_No_Gluc" \
or self.run_name == "Val_BT_No_A1c" \
or self.run_name == "Val_Blood_Tests" \
or self.run_name == "Val_Anthropometry"\
or self.run_name == "Val_Diet" \
or self.run_name == "Val_All_No_gen":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Val_Singles/"
self.model_type = "gbdt"
self.batch_size=10
self.mode = "explore"
elif self.run_name == "Val_Non_Lab"\
or self.run_name == "Val_Non_Lab_min"\
or self.run_name == "Val_Non_Lab_no_diet" \
or self.run_name =="Val_Anthro_based_min" \
or self.run_name == "Val_Antro_whr_family":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Val_Combos/"
self.model_type = "gbdt"
self.batch_size=25
self.mode = "explore"
elif self.run_name == "Val_Ten_Blood_Tests"\
or self.run_name == "Val_Six_Blood_Tests"\
or self.run_name == "Val_Five_Blood_Tests" \
or self.run_name == "Val_Four_Blood_Tests" \
or self.run_name =="Val_HbA1c"\
or self.run_name =="Val_Three_wo_Triglycerides_Blood_Tests"\
or self.run_name =="Val_Three_wo_reticulocytes_Blood_Tests" \
or self.run_name =="Val_Two_Blood_Tests":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Val_BT/"
self.model_type = "gbdt"
self.batch_size=25
self.mode = "explore"
elif self.run_name == "A12_Socio_Genetics_explore"\
or self.run_name == "All_explore":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/explore_val/"
self.model_type = "gbdt"
self.batch_size = 2
self.mode = "explore"
elif self.run_name == "Age_and_Sex" \
or self.run_name == "BP_and_HR" \
or self.run_name == "Socio_demographics" \
or self.run_name == "Family_and_Ethnicity" \
or self.run_name == "Physical_health" \
or self.run_name == "Mental_health" \
or self.run_name == "Medication" \
or self.run_name == "Lifestyle_and_physical_activity" \
or self.run_name == "HbA1c" \
or self.run_name == "Family_and_Ethnicity" \
or self.run_name == "Early_Life_Factors" \
or self.run_name == "BT_No_A1c_No_Gluc" \
or self.run_name == "BT_No_A1c" \
or self.run_name == "Blood_Tests" \
or self.run_name == "Five_Blood_Tests" \
or self.run_name == "Anthropometry"\
or self.run_name == "Diet":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/New_Singles/"
self.model_type = "gbdt"
self.batch_size=25
elif self.run_name == "A12_Socio_Genetics"\
or self.run_name == "All":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Imputed_screened/New_Addings/"
self.model_type = "gbdt"
self.batch_size = 2
elif self.run_name == "A1_BT__Anthro_explore"\
or self.run_name == "A2_Anthro__Physical_Health_explore"\
or self.run_name == "A3_Physical_Health__Lifestyle_explore"\
or self.run_name == "A4_Lifestyle__BP_n_HR_explore"\
or self.run_name == "A5l_BP_n_HR__ND_Diagnosis_explore"\
or self.run_name == "A6_ND_Diagnosis__Mental_explore"\
or self.run_name == "A7_Mental__Medication_explore"\
or self.run_name == "A8_Medication__Diet_explore"\
or self.run_name == "A9_Diet__Family_explore"\
or self.run_name == "A10_Family__ELF_explore"\
or self.run_name == "A11_ELF__Socio_explore"\
or self.run_name == "All_No_A1c_No_Gluc_explore":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Val_Addings/"
self.model_type = "gbdt"
self.batch_size = 10
self.hyper_parameter_iter = 100
self.mode="explore"
elif self.run_name == "A1_BT__Anthro"\
or self.run_name == "A2_Anthro__Physical_Health"\
or self.run_name == "A3_Physical_Health__Lifestyle"\
or self.run_name == "A4_Lifestyle__BP_n_HR"\
or self.run_name == "A5_BP_n_HR__ND_Diagnosis"\
or self.run_name == "A6_ND_Diagnosis__Mental"\
or self.run_name == "A7_Mental__Medication"\
or self.run_name == "A8_Medication__Diet"\
or self.run_name == "A9_Diet__Family"\
or self.run_name == "A10_Family__ELF"\
or self.run_name == "A11_ELF__Socio"\
or self.run_name == "All_No_A1c_No_Gluc":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/New_Addings/"
self.model_type = "gbdt"
self.batch_size = 10
elif self.run_name == "LR_Antro_whr_family"\
or self.run_name == "LR_Finrisc"\
or self.run_name == "LR_Finrisc_w_TTV"\
or self.run_name == "LR_Antro_scoreboard":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/New_Baseline_compare/"
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/" \
+self.run_name[3:]+".csv"
self.model_type = "LR"
self.batch_size = 50
elif self.run_name == "LR_Antro_scoreboard":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Scoreboard/"
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/" \
+self.run_name[3:]+".csv"
self.model_type = "LR"
self.batch_size = 50
elif self.run_name == "Strat_L39_Antro_neto_whr"\
or self.run_name == "Strat_L39_A11_minimal"\
or self.run_name == "Strat_L39_All":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/New_A1c_strat/"
self.model_type = "gbdt"
self.charac_selected = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All", "Minimal_a1c": 39}
self.batch_size = 50
elif self.run_name == "Strat_L20_H39_Antro_neto_whr"\
or self.run_name == "Strat_L20_H39_All":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/New_A1c_strat/"
self.model_type = "gbdt"
self.charac_selected = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All", "Minimal_a1c": 20, "Maximal_a1c": 39}
self.batch_size = 50
elif self.run_name == "LR_All_No_A1c_No_Gluc"\
or self.run_name == "LR_BT_No_A1c":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/New_Addings/"
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/"\
+self.run_name[3:]+".csv"
self.model_type = "LR"
self.compute_CI = True
self.batch_size=20
elif self.run_name == "LR_Anthropometry_NO_whr":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/New_Baseline_compare/"
self.features_file_path = \
"/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/Anthropometry.csv"
self.model_type = "LR"
self.batch_size=50
elif self.run_name == "LR_Strat_L39_Antro_neto_whr":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Imputed_screened/A1c_strat"
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/Antro_neto_whr.csv"
self.model_type = "LR"
self.batch_size=50
self.charac_selected = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All", "Minimal_a1c": 39}
elif self.run_name == "LR_Strat_L20_H39_Antro_neto_whr":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Imputed_screened/A1c_strat"
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/Antro_neto_whr.csv"
self.model_type = "LR"
self.batch_size=50
self.charac_selected = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All", "Minimal_a1c": 20, "Maximal_a1c": 39}
elif self.run_name == "LR_Strat_L39_Blood_tests":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Imputed_screened/New_A1c_strat/"
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/Blood_Tests.csv"
self.model_type = "LR"
self.batch_size=20
self.charac_selected = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All", "Minimal_a1c": 39}
elif self.run_name == "LR_Strat_L20_H39_Blood_tests":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Imputed_screened/New_A1c_strat/"
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/Blood_Tests.csv"
self.model_type = "LR"
self.batch_size=20
self.charac_selected = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All", "Minimal_a1c": 20, "Maximal_a1c": 39}
elif self.run_name == "LR_Strat_L39_Five_Blood_Tests":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Imputed_screened/A1c_strat/"
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/Five_Blood_Tests.csv"
self.model_type = "LR"
self.batch_size = 50
self.charac_selected = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All", "Minimal_a1c": 39}
elif self.run_name == "LR_Strat_L20_H39_Five_Blood_Tests":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Imputed_screened/A1c_strat/"
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/Five_Blood_Tests.csv"
self.model_type = "LR"
self.batch_size = 50
self.charac_selected = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All", "Minimal_a1c": 20, "Maximal_a1c": 39}
elif self.run_name == "LR_Strat_L39_Finrisc":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/New_A1c_strat/"
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/Finrisc.csv"
self.model_type = "LR"
self.charac_selected = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All", "Minimal_a1c": 39}
self.batch_size=50
elif self.run_name == "LR_Strat_L20_H39_Finrisc":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/New_A1c_strat/"
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/Finrisc.csv"
self.model_type = "LR"
self.charac_selected = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All", "Minimal_a1c": 20, "Maximal_a1c": 39}
self.batch_size=50
elif self.run_name == "Age_strat":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Age_Strat/"
self.model_type = "gbdt"
elif self.run_name == "LR_A1c_Strat_low_All_No_A1c_No_Gluc":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/New_A1c_strat/"
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/All_No_A1c_No_Gluc.csv"
self.model_type = "LR"
self.charac_selected={"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All","Minimal_a1c":20,"Maximal_a1c":39}
elif self.run_name == "LR_A1c_Strat_high_All_No_A1c_No_Gluc":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/New_A1c_strat/"
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/All_No_A1c_No_Gluc.csv"
self.model_type = "LR"
self.charac_selected = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All", "Minimal_a1c": 39}
self.batch_size=50
elif self.run_name=="SA_GDRS":
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/GDRS.csv"
self.model_type = "SA"
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Imputed_screened/New_Baseline_compare/"
self.compute_CI=True
elif self.run_name=="SA_Strat_L39_GDRS":
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/GDRS.csv"
self.model_type="SA"
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Imputed_screened/New_A1c_strat/"
self.compute_CI=True
self.charac_selected={"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All","Minimal_a1c":39}
elif self.run_name=="SA_Strat_L20_H39_GDRS":
self.features_file_path = "/home/edlitzy/Biobank/Diabetes_Features_lists/For_article/GDRS.csv"
self.model_type="SA"
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Imputed_screened/New_A1c_strat/"
self.compute_CI=True
self.charac_selected={"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All","Maximal_a1c":39,"Minimal_a1c":20}
elif self.run_name == "Anthro_based_min"\
or self.run_name == "Antro_neto_whr"\
or self.run_name == "Antro_whr_family":
self.Folder_path = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/New_Baseline_compare/"
self.model_type = "gbdt"
else:
sys.exit("Model:",self.run_name, " was not found in CI_configs")
def Set_GBDT(self,job_name="Diabetes"):
self.best_run = str(pd.read_csv(os.path.join(self.final_folder,self.job_name +
"_result_sorted.csv"), index_col=0).index.values[0])
with open(os.path.join(self.final_folder,"Rel_Feat_Names"), 'rb') as fp:
self.Rel_Feat_Names = pickle.load(fp)
with open(os.path.join(self.final_folder,"cat_names"), 'rb') as fp:
self.cat_names = pickle.load(fp)
self.parameters = pd.read_csv(os.path.join(self.final_folder,self.job_name + "_Parameters_Table.csv"),
index_col=0) # Check that we can read params and build the selected model, train it and make all required drawings
self.parameters = self.parameters.loc[['SN', 'boost_from_average', 'boosting_type', 'colsample_bytree',
'is_unbalance', 'lambda_l1', 'lambda_l2', 'learning_rate', 'metric',
'min_child_samples', 'num_boost_round', 'num_threads', 'objective',
'subsample', 'verbose'], :]
self.parameters.columns = self.parameters.loc["SN", :]
self.parameters.drop(index="SN", inplace=True)
self.params_dict = self.parameters.loc[:, self.best_run].to_dict()
self.cat_ind = [x for x, name in enumerate(self.Rel_Feat_Names) if name in self.cat_names]
self.params_bu = self.params_dict
self.CI_load_data()
def CI_load_data(self):
data_path = self.final_folder
with open(os.path.join(data_path, "Diabetestrain_Data"), 'rb') as fp:
Train_Data = pickle.load(fp)
with open(os.path.join(data_path, "Diabetestest_Data"), 'rb') as fp:
Test_Data = pickle.load(fp)
self.X_train = Train_Data["df_Features"]
self.y_train = Train_Data["DF_Targets"]
self.X_val = Test_Data["df_Features"]
self.y_val = Test_Data["DF_Targets"]
if self.new_features_file_path_for_testing!=None:
self.choose_new_GBDT_test_data()
# return self.X_train, self.y_train, self.X_val, self.y_val
def choose_new_GBDT_test_data(self):
new_features_path=self.new_features_file_path_for_testing
all_data=pd.read_csv(self.Val_file_path,index_col="eid",usecols=["eid","30750-0.0"])
try:
use_index_df=all_data.loc[all_data["30750-0.0"]>self.minimal_a1c]
except:
print ("self.minimal_a1c is not defined")
try:
use_index_df=all_data.loc[all_data["30750-0.0"]>self.minimal_a1c]\
.loc[all_data["30750-0.0"]<self.maximal_a1c].index
except:
print ("self.maximal_a1c is not defined")
use_index=use_index_df.index
self.FEAT_DF = pd.read_csv(new_features_path) # Read Features files
self.FEAT_DF = self.FEAT_DF[self.FEAT_DF["Exclude"] != 1]
Use_Columns = [x for x in self.FEAT_DF["Field ID"]]
self.X_val = self.X_val.loc[use_index,Use_Columns]
self.y_val = self.y_val .loc[use_index,Use_Columns]
def create_dir(self):
if not os.path.exists(self.Folder_path):
try:
(os.makedirs(self.Folder_path))
except:
print(("Couldn't create ",self.Folder_path))
if not os.path.exists(self.Training_path):
try:
(os.makedirs(self.Training_path))
except:
print(("Couldn't create ",self.Training_path))
if not os.path.exists(self.CI_results_path):
try:
(os.makedirs(self.CI_results_path))
except:
print(("Couldn't create ",self.CI_results_path))
def calc_CI(self):
if self.model_type=="SA" or self.model_type=="LR":
results_frame_list = [pd.read_csv(os.path.join(self.CI_results_path,f)) for
f in os.listdir(self.CI_results_path) if
(os.path.isfile(os.path.join(self.CI_results_path, f)) and
(f.startswith("AUC_APS_results_")) or f.startswith("AUC_APS_results_")) ]
results_df=pd.concat(results_frame_list)
aucroc_list=list(results_df.loc[:,"AUC"].values)
aps_list=list(results_df.loc[:,"APS"].values)
self.AUROC_lower,self.AUROC_upper=self.calc_CI_percentile(aucroc_list)
self.APS_lower,self.APS_upper=self.calc_CI_percentile(aps_list)
self.APS_median=np.median(np.array(aps_list))
self.APS_mean = np.mean(np.array(aps_list))
self.AUROC_median = np.median(np.array(aucroc_list))
self.AUROC_mean = np.mean(np.array(aucroc_list))
CI_Results_DF = pd.DataFrame.from_dict({"AUROC_mean": [self.AUROC_mean], "AUROC_median": [self.AUROC_median],
"AUROC_upper": [self.AUROC_upper], "AUROC_lower": [self.AUROC_lower],
"APS_mean": [self.APS_mean], "APS_median": [self.APS_median],
"APS_upper": [self.APS_upper], "APS_lower": [self.APS_lower]})
CI_Results_DF.index = [self.run_name]
CI_Results_DF.to_csv(self.CI_results_summary_table)
print(("Results of",self.run_name,"saved to: ",self.CI_results_summary_table))
print(("Results are: ",CI_Results_DF))
elif self.model_type=="gbdt":
aucroc_list = []
aps_list = []
onlyfiles = [f for f in os.listdir(self.CI_results_path) if
(os.path.isfile(os.path.join(self.CI_results_path, f)) and f.startswith("CI_Dict"))]
for f in onlyfiles:
with open(os.path.join(self.CI_results_path, f), 'rb') as fp:
self.data_dict = pickle.load(fp)
aucroc_list.append(self.data_dict["AUROC"])
aps_list.append(self.data_dict["APS"])
self.AUROC_lower, self.AUROC_upper = self.calc_CI_percentile(aucroc_list)
self.APS_lower, self.APS_upper = self.calc_CI_percentile(aps_list)
self.APS_median = np.median(np.array(aps_list))
self.APS_mean = np.mean(np.array(aps_list))
self.AUROC_median = np.median(np.array(aucroc_list))
self.AUROC_mean = np.mean(np.array(aucroc_list))
CI_Results_DF = pd.DataFrame.from_dict(
{"AUROC_mean": [self.AUROC_mean], "AUROC_median": [self.AUROC_median],
"AUROC_upper": [self.AUROC_upper], "AUROC_lower": [self.AUROC_lower],
"APS_mean": [self.APS_mean], "APS_median": [self.APS_median],
"APS_upper": [self.APS_upper], "APS_lower": [self.APS_lower]})
CI_Results_DF.index = [self.run_name]
results_path=os.path.join(self.CI_results_path, "CI_results.csv")
CI_Results_DF.to_csv(results_path)
print(("CI_Results_DF saved to:",results_path))
print(("CI_Results_DF are:",CI_Results_DF))
def calc_CI_percentile(self,metric_list,alpha = 0.95):
p = ((1.0 - alpha) / 2.0) * 100
lower = max(0.0, np.percentile(metric_list, p))
p = (alpha + ((1.0 - alpha) / 2.0)) * 100
upper = min(1.0, np.percentile(metric_list, p))
print(('%.1f confidence interval %.1f%% and %.1f%%' % (alpha * 100, lower * 100, upper * 100)))
return lower, upper
def check_exist_CI_files(self):
exist_files = os.listdir(os.path.join(self.CI_results_path))
try:
file_names = [x.split("_")[-1] for x in exist_files]
self.exist_CI_files = file_names
except:
self.exist_CI_files = []
try:
self.missing_files=[str(x) for x in np.arange(self.num_of_bootstraps) if str(x) not in file_names]
# print (".missing_files=",self.missing_files)
except:
self.missing_files=[]
# class RunParams:
# #For GBDT runs
# def __init__(self,model_path_name=[],BASIC_PROB_BASED_JOB_NAME=[],num_of_bootstraps=1000):
# print("Fetching params for predicitng", model_path_name," CI")
# self.path= model_path_name+"_Diabetes"
# self.CI_results_path = os.path.join(model_path_name, "Diabetes_Results", "CI")
# self.job_name = "Diabetes"
# self.RUN_NAME = self.path.split("/")[-2]
# print("RUN_NAME:", self.RUN_NAME)
# self.num_of_bootstraps=num_of_bootstraps
# self.VERBOSE_EVAL = 1000
# if BASIC_PROB_BASED_JOB_NAME==[]:
# self.BASIC_PROB_BASED_JOB_NAME = "_".join((self.path.split("/")[-1]).split("_")[:-1])
# else:
# self.BASIC_PROB_BASED_JOB_NAME=BASIC_PROB_BASED_JOB_NAME
# print("BASIC_PROB_BASED_JOB_NAME:", self.BASIC_PROB_BASED_JOB_NAME)
# self.SAVE_FOLDER = "/net/mraid08/export/jafar/Yochai/UKBB_Runs/For_article/Minimal/" + self.RUN_NAME + "/"
# self.Save_2_folder = self.SAVE_FOLDER + self.BASIC_PROB_BASED_JOB_NAME + "_" + self.job_name + "/"
# self.final_folder = self.Save_2_folder + self.job_name + "_Results/"
# self.run_name = str(pd.read_csv(self.final_folder + self.job_name + "_result_sorted.csv", index_col=0).index.values[0])
#
# with open(self.final_folder + "Rel_Feat_Names", 'rb') as fp:
# self.Rel_Feat_Names = pickle.load(fp)
# with open(self.final_folder + "cat_names", 'rb') as fp:
# self.cat_names = pickle.load(fp)
#
# self.parameters = pd.read_csv(self.final_folder + self.job_name + "_Parameters_Table.csv",
# index_col=0) # Check that we can read params and build the selected model, train it and make all required drawings
# self.parameters =self.parameters.loc[['SN','boost_from_average', 'boosting_type', 'colsample_bytree',
# 'is_unbalance', 'lambda_l1', 'lambda_l2', 'learning_rate','metric',
# 'min_child_samples', u'num_boost_round', u'num_threads','objective',
# 'subsample', 'verbose'],:]
# self.parameters.columns = self.parameters.loc["SN", :]
# self.parameters.drop(index="SN", inplace=True)
# self.params_dict = self.parameters.loc[:,self.run_name].to_dict()
#
# self.cat_ind = [x for x, name in enumerate(self.Rel_Feat_Names) if name in self.cat_names]
# self.params_bu = self.params_dict
# self.create_dir()
# self.CI_load_data()
#
#
# def CI_load_data(self):
# """path should be equal to path"""
# data_path = os.path.join(self.path, "Diabetes_Results")
#
# with open(os.path.join(data_path, "Diabetestrain_Data"), 'rb') as fp:
# Train_Data = pickle.load(fp)
# with open(os.path.join(data_path, "Diabetestest_Data"), 'rb') as fp:
# Test_Data = pickle.load(fp)
# self.X_train = Train_Data["df_Features"]
# self.y_train = Train_Data["DF_Targets"]
# self.X_val = Test_Data["df_Features"]
# self.y_val = Test_Data["DF_Targets"]
# return self.X_train, self.y_train, self.X_val, self.y_val
#
# def calc_CI(self):
# aucroc_list=[]
# aps_list=[]
# onlyfiles = [f for f in os.listdir(self.CI_results_path) if (os.path.isfile(os.path.join(self.CI_results_path, f)) and f.startswith("CI_Dict"))]
# for f in onlyfiles:
# with open(os.path.join(self.CI_results_path, f), 'rb') as fp:
# self.data_dict=pickle.load(fp)
# aucroc_list.append(self.data_dict["AUROC"])
# aps_list.append(self.data_dict["APS"])
# self.AUROC_lower,self.AUROC_upper=self.calc_CI_percentile(aucroc_list)
# self.APS_lower,self.APS_upper=self.calc_CI_percentile(aps_list)
# self.APS_median=np.median(np.array(aps_list))
# self.APS_mean = np.mean(np.array(aps_list))
# self.AUROC_median = np.median(np.array(aucroc_list))
# self.AUROC_mean = np.mean(np.array(aucroc_list))
# CI_Results_DF = pd.DataFrame.from_dict({"AUROC_mean": [self.AUROC_mean], "AUROC_median": [self.AUROC_median],
# "AUROC_upper": [self.AUROC_upper], "AUROC_lower": [self.AUROC_lower],
# "APS_mean": [self.APS_mean], "APS_median": [self.APS_median],
# "APS_upper": [self.APS_upper], "APS_lower": [self.APS_lower]})
# CI_Results_DF.index = [self.BASIC_PROB_BASED_JOB_NAME]
# CI_Results_DF.to_csv(os.path.join(self.CI_results_path,"CI_results.csv"))
#
# def calc_CI_percentile(self,metric_list,alpha = 0.95):
# p = ((1.0 - alpha) / 2.0) * 100
# lower = max(0.0, np.percentile(metric_list, p))
# p = (alpha + ((1.0 - alpha) / 2.0)) * 100
# upper = min(1.0, np.percentile(metric_list, p))
# print('%.1f confidence interval %.1f%% and %.1f%%' % (alpha * 100, lower * 100, upper * 100))
# return lower, upper
#
# def create_dir(self):
# if not os.path.exists(self.CI_results_path):
# os.makedirs(self.CI_results_path)
| 58.258367
| 155
| 0.609412
|
5f9a3baf53c116c9f7efd46e0960fb72f637a434
| 636
|
py
|
Python
|
EventFilter/CSCRawToDigi/python/cscSQLiteCablingPack_cff.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 6
|
2017-09-08T14:12:56.000Z
|
2022-03-09T23:57:01.000Z
|
EventFilter/CSCRawToDigi/python/cscSQLiteCablingPack_cff.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 545
|
2017-09-19T17:10:19.000Z
|
2022-03-07T16:55:27.000Z
|
EventFilter/CSCRawToDigi/python/cscSQLiteCablingPack_cff.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 14
|
2017-10-04T09:47:21.000Z
|
2019-10-23T18:04:45.000Z
|
import FWCore.ParameterSet.Config as cms
# different es_sources are used for different purposes - packing and unpacking
# this one is for packing
cscPackingCabling = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb'),
authenticationMethod = cms.untracked.uint32(1)
),
timetype = cms.string('runnumber'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('CSCChamberMapRcd'),
tag = cms.string('CSCChamberMap')
)),
connect = cms.string('sqlite_fip:CondCore/SQLiteData/data/CSCChamberMapValues_18X.db')
)
| 33.473684
| 90
| 0.707547
|
fe141af70c30876b071d0982117206cc03d63d6e
| 4,000
|
py
|
Python
|
src/bluetooth/bluetooth_streamer.py
|
mikaponics/mikapod-soil-rpi
|
5090a2cf7d252b7e53fe25680048732c0c9cecb9
|
[
"BSD-3-Clause"
] | null | null | null |
src/bluetooth/bluetooth_streamer.py
|
mikaponics/mikapod-soil-rpi
|
5090a2cf7d252b7e53fe25680048732c0c9cecb9
|
[
"BSD-3-Clause"
] | null | null | null |
src/bluetooth/bluetooth_streamer.py
|
mikaponics/mikapod-soil-rpi
|
5090a2cf7d252b7e53fe25680048732c0c9cecb9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import datetime
import signal
import time
import json
from serial import Serial
import Pyro4
import pytz
from foundation import *
"""
THE PURPOSE OF THIS CODE IS TO PROVIDE A STREAM OF DATA FROM THE SERIAL TERMINALS
TO THE PROGRAMMERS CONSOLE TO SEE WHAT THE ARDUINO DEVICE IS RETURNING. THIS
CODE IS TO BE USED FOR TESTING PURPOSES ONLY! **DO NOT USE IN PRODUCTION**
"""
class ServiceExit(Exception):
"""
Custom exception which is used to trigger the clean exit
of all running threads and the main program.
"""
pass
def onServiceShutdownHandler(signum, frame):
"""
Function to be called by our `SIGINT` and `SIGTERM` handlers.
"""
print("-------------------------------------------------------------------")
print(getDT(), '| SERIAL TERMINAL STREAM | Caught signal %d' % signum)
print("-------------------------------------------------------------------")
raise ServiceExit
class BluetoothSerialTerminalPrint(object):
"""
Service interacts with the external device (Arduino) and prints the data
on a specific interval to the user's console.
"""
def __init__(self):
'''
Wait until our computer can connect to the external device (Ardunio)
over serial USB communication to begin running our program.
'''
try:
self.__serial = Serial(SERIAL_PORT, SERIAL_BAUD, timeout=SERIAL_TIMEOUT)
time.sleep(2) # Wait for serial terminal to setup.
print(getDT(), "| SERIAL TERMINAL STREAM | Successfully connected to serial port:", SERIAL_PORT);
except Exception as e:
if "could not open port" in str(e):
print(getDT(), "| SERIAL TERMINAL STREAM | Could not connect to serial port:", SERIAL_PORT);
exit()
'''
Load up our application variables.
'''
self.__storage = Pyro4.Proxy("PYRONAME:mikapod.storage")
def runOnMainLoop(self):
"""
Function is the main loop of the application.
"""
print(getDT(), "| SERIAL TERMINAL STREAM | Register the signal handlers.")
signal.signal(signal.SIGTERM, onServiceShutdownHandler)
signal.signal(signal.SIGINT, onServiceShutdownHandler)
print(getDT(), "| SERIAL TERMINAL STREAM | Starting main program.")
try:
self.runOperationLoop()
except ServiceExit:
print(getDT(), "| SERIAL TERMINAL STREAM | Gracefully shutting down.")
print(getDT(), "| SERIAL TERMINAL STREAM | Exiting main program.")
def runOperationLoop(self):
# Keep running the main runtime loop with various
# being inputted for a frame of reference in the computations
# along with a few computations.
while True:
byte_data = self.__serial.readline()
string_data = byte_data.decode('UTF-8') # NOTE: https://stackoverflow.com/questions/6269765/what-does-the-b-character-do-in-front-of-a-string-literal#6273618
# Check to see if ANY data was returned from the serial port, if
# there was then we load up the string
if len(string_data) > 0:
array_data = [x.strip() for x in string_data.split(',')]
print(getDT(), "| SERIAL TERMINAL STREAM | Output - Pre:"+string_data+"\n")
print(getDT(), "| SERIAL TERMINAL STREAM | Output - Post:"+str(array_data)+"\n")
commandID = int(array_data[0])
var = array_data[1]
opcode = array_data[2]
if int(commandID) == SET_WIFI_COMMAND_ID:
self.changeWiFiCommand(var, opcode)
def changeWiFiCommand(self, ssid, pw):
print(getDT(), "| SERIAL TERMINAL STREAM | Set Wifi w/ SSID `"+ssid+"` and PW `"+pw+"`.\n")
if __name__ == "__main__":
"""
Main entry into the main program.
"""
app = BluetoothSerialTerminalPrint()
app.runOnMainLoop()
| 35.714286
| 169
| 0.60925
|
ac347cb64087688cbca4c6597259862b2fb029db
| 421
|
py
|
Python
|
Mac/Lib/test/tsnd.py
|
1byte2bytes/cpython
|
7fbaeb819ca7b20dca048217ff585ec195e999ec
|
[
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | 3
|
2022-01-30T20:08:24.000Z
|
2022-02-12T08:51:12.000Z
|
Mac/Lib/test/tsnd.py
|
1byte2bytes/cpython
|
7fbaeb819ca7b20dca048217ff585ec195e999ec
|
[
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null |
Mac/Lib/test/tsnd.py
|
1byte2bytes/cpython
|
7fbaeb819ca7b20dca048217ff585ec195e999ec
|
[
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null |
# Show off SndPlay (and some resource manager functions).
# Get a list of all 'snd ' resources in the system and play them all.
from Res import *
from Snd import *
ch = SndNewChannel(0, 0, None)
print "Channel:", ch
type = 'snd '
for i in range(CountResources(type)):
r = GetIndResource(type, i+1)
print r.GetResInfo(), r.size
if r.GetResInfo()[0] == 1:
print "Skipping simple beep"
continue
ch.SndPlay(r, 0)
| 22.157895
| 69
| 0.691211
|
f4bbb190ec710faea32a2184c66815e0f4ba9a06
| 2,074
|
py
|
Python
|
test/sagemaker_tests/tensorflow/tensorflow2_training/integration/sagemaker/test_horovod.py
|
arjkesh/deep-learning-containers-1
|
9bf65197dee8a6f59b3d4ee240dcc11824240854
|
[
"Apache-2.0"
] | 1
|
2021-12-17T15:50:48.000Z
|
2021-12-17T15:50:48.000Z
|
test/sagemaker_tests/tensorflow/tensorflow2_training/integration/sagemaker/test_horovod.py
|
arjkesh/deep-learning-containers-1
|
9bf65197dee8a6f59b3d4ee240dcc11824240854
|
[
"Apache-2.0"
] | null | null | null |
test/sagemaker_tests/tensorflow/tensorflow2_training/integration/sagemaker/test_horovod.py
|
arjkesh/deep-learning-containers-1
|
9bf65197dee8a6f59b3d4ee240dcc11824240854
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import sagemaker
from sagemaker.tensorflow import TensorFlow
from ...integration.utils import processor, py_version, unique_name_from_base # noqa: F401
RESOURCE_PATH = os.path.join(os.path.dirname(__file__), '..', '..', 'resources')
def test_distributed_training_horovod(sagemaker_session,
instance_type,
ecr_image,
tmpdir,
framework_version):
mpi_options = '-verbose -x orte_base_help_aggregate=0'
estimator = TensorFlow(
entry_point=os.path.join(RESOURCE_PATH, 'mnist', 'horovod_mnist.py'),
role='SageMakerRole',
train_instance_type=instance_type,
train_instance_count=2,
image_name=ecr_image,
framework_version=framework_version,
py_version='py3',
script_mode=True,
hyperparameters={'sagemaker_mpi_enabled': True,
'sagemaker_mpi_custom_mpi_options': mpi_options,
'sagemaker_mpi_num_of_processes_per_host': 1},
sagemaker_session=sagemaker_session)
estimator.fit(job_name=unique_name_from_base('test-tf-horovod'))
model_data_source = sagemaker.local.data.get_data_source_instance(
estimator.model_data, sagemaker_session)
for filename in model_data_source.get_file_list():
assert os.path.basename(filename) == 'model.tar.gz'
| 39.132075
| 91
| 0.677917
|
6a433c4979f41ba2eb37ceda0c23eab40e0647db
| 2,647
|
py
|
Python
|
travy/travy.py
|
ribal-aladeeb/Simple-Camera
|
a4d664b0878e8e86e126c45fb0df9f5aa62ce5b3
|
[
"Apache-2.0"
] | null | null | null |
travy/travy.py
|
ribal-aladeeb/Simple-Camera
|
a4d664b0878e8e86e126c45fb0df9f5aa62ce5b3
|
[
"Apache-2.0"
] | 2
|
2019-01-11T17:06:29.000Z
|
2019-01-19T04:05:56.000Z
|
travy/travy.py
|
YannCedric/Simple-Camera
|
a4d664b0878e8e86e126c45fb0df9f5aa62ce5b3
|
[
"Apache-2.0"
] | 2
|
2020-01-16T07:21:51.000Z
|
2020-03-19T20:41:48.000Z
|
import requests
import pprint
import json
import flask
from config import token
import re
# pretty prints json to terminal
dump = pprint.PrettyPrinter(indent=4).pprint
# dump obj to data.json
def file_dump(obj):
with open('data.json', 'w') as outfile:
json.dump(obj, outfile)
# make a get request to travis api
def travy_get(url):
headers = {
"Travis-API-Version": "3",
"User-Agent" : "User-Agent: API Explorer",
"Authorization": "token " + token
}
base = "https://api.travis-ci.com"
request_url = base+url
dump(request_url)
return requests.get(request_url, headers=headers).json()
def latest_build():
url = "/repo/7494371/builds?limit=1"
build = travy_get(url)['builds'][0] # get build of latest commit
return build
def get_build(build_no):
# ref to failing build
url = "/build/" + build_no # eg. 107300346
build = travy_get(url) # get specific build
return build
def analyse_build(build):
dump(build)
jobs = build['jobs'] # get the jobs of the build
logs = {"logs":{}, "commit":{}} # log object
for job in jobs:
full_job = travy_get(job['@href'])
stage = full_job['stage']
if stage['state'] == "failed":
stage_name = stage['name'] # lint, build, test
log = travy_get(job['@href']+"/log")['content'] # get log of the build
logs['logs'][stage_name] = log # Assign it the log
if stage_name == "Test":
logs['logs'][stage_name] = []
ex = re.compile("KtUnitTests > \w* FAILED\\r\\n.+?(?=\\r\\n)")
x = ex.findall(log)
for match in x:
arr = match.split("\r\n")
print(arr)
dictionary = { "test":arr[0] , "reason":arr[1].strip(" ") }
logs['logs'][stage_name].append(dictionary)
elif stage['state'] == "canceled":
log = {"@type":'error', "error_message":"Stage cancelled so job was not run"}
logs['logs'][stage['name']] = log
elif stage['state'] == "created":
log = {"@type":'message', "message":"Stage created."}
logs['logs'][stage['name']] = log
else: # stage['state'] == "passed":
print(stage['state'])
log = {"@type":'message', "message":"Stage passed."}
logs['logs'][stage['name']] = log
logs['commit'] = build['commit']
logs['@href'] = build['@href']
logs['created_by'] = build['created_by']
file_dump(logs)
return logs
| 34.376623
| 90
| 0.542501
|
07d26c80accf3bb9c903dd7c933451531d150963
| 1,310
|
py
|
Python
|
setup.py
|
PaintingInAir/Optimal
|
00b8572c82bc1a586d399ca7ba3f2131d14d426b
|
[
"MIT"
] | 36
|
2017-09-08T14:47:27.000Z
|
2022-03-31T02:12:31.000Z
|
setup.py
|
JustinLovinger/Optimal
|
00b8572c82bc1a586d399ca7ba3f2131d14d426b
|
[
"MIT"
] | 1
|
2020-03-03T04:29:57.000Z
|
2020-07-06T03:42:56.000Z
|
setup.py
|
JustinLovinger/optimal
|
00b8572c82bc1a586d399ca7ba3f2131d14d426b
|
[
"MIT"
] | 14
|
2017-10-03T12:45:20.000Z
|
2021-12-16T01:48:00.000Z
|
from distutils.core import setup
# Convert README.md to long description
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
long_description = long_description.replace("\r", "") # YOU NEED THIS LINE
except (ImportError, OSError, IOError):
print("Pandoc not found. Long_description conversion failure.")
import io
# pandoc is not installed, fallback to using raw contents
with io.open('README.md', encoding="utf-8") as f:
long_description = f.read()
setup(
name='optimal',
version='0.2.1',
packages=['optimal', 'optimal.algorithms'],
# Include example and test files
package_data={'optimal': ['examples/*.py', 'tests/*.py', 'tests/algorithms/*.py']},
# Include readme
data_files=[('', ['README.md'])],
# Dependencies
install_requires=[
'numpy'
],
# Metadata
author='Justin Lovinger',
license='MIT',
description="A python metaheuristic optimization library. Currently supports Genetic Algorithms, Gravitational Search, and Cross Entropy.",
long_description=long_description,
keywords=['optimization', 'metaheuristic', 'genetic algorithm', 'GA',
'gravitational search algorithm', 'GSA', 'cross entropy'],
url='https://github.com/JustinLovinger/optimal',
)
| 34.473684
| 143
| 0.676336
|
537a75f1c0418fecdd9238c0b81727bf8a691fae
| 279
|
py
|
Python
|
ippon/point/serializers.py
|
morynicz/ippon_back
|
dce901bfc649c6f8efbbf0907654e0860606b3e3
|
[
"MIT"
] | null | null | null |
ippon/point/serializers.py
|
morynicz/ippon_back
|
dce901bfc649c6f8efbbf0907654e0860606b3e3
|
[
"MIT"
] | 13
|
2018-12-22T15:30:56.000Z
|
2022-03-12T00:22:31.000Z
|
ippon/point/serializers.py
|
morynicz/ippon_back
|
dce901bfc649c6f8efbbf0907654e0860606b3e3
|
[
"MIT"
] | 2
|
2019-06-01T11:28:23.000Z
|
2020-03-27T15:19:11.000Z
|
from rest_framework import serializers
import ippon.point
class PointSerializer(serializers.ModelSerializer):
class Meta:
model = ippon.models.point.Point
fields = (
'id',
'type',
'player',
'fight'
)
| 18.6
| 51
| 0.555556
|
f7f8ed932b7446f3626d5114e48e64208afa23ee
| 28,897
|
py
|
Python
|
src/bdbd/src/bdbd/analysis/motion/NewRaph10.py
|
rkent/BDBD
|
c5d391da84faf5607c443078781f8b4e1c017dd5
|
[
"MIT"
] | null | null | null |
src/bdbd/src/bdbd/analysis/motion/NewRaph10.py
|
rkent/BDBD
|
c5d391da84faf5607c443078781f8b4e1c017dd5
|
[
"MIT"
] | null | null | null |
src/bdbd/src/bdbd/analysis/motion/NewRaph10.py
|
rkent/BDBD
|
c5d391da84faf5607c443078781f8b4e1c017dd5
|
[
"MIT"
] | null | null | null |
# newton-raphson iteration of motion equations
import numpy as np
import rospy
import math
import time
from bdbd_common.utils import fstr, gstr
from bdbd_common.msg import LeftRights
from bdbd_common.geometry import lr_est, default_lr_model, D_TO_R
def estr(a):
return fstr(a, fmat='10.7g', n_per_line=10)
class NewRaph():
def __init__(self, n, dt
,lr_model=default_lr_model()
,start_pose=(0.0, 0.0, 0.0)
,start_twist=(0.0, 0.0, 0.0)
):
self.lr_model = lr_model
self.n = n
self.dt = dt
self.start_pose = start_pose
self.start_twist = start_twist
# prep constants for calculations
alr_model = np.array(self.lr_model)
self.bhes = (dt * alr_model[0], dt * alr_model[1], dt * alr_model[2])
(_, _, qhx) = self.bhes[0]
(_, _, qhy) = self.bhes[1]
(_, _, qho) = self.bhes[2]
#print('(bhxl, bhxr, qhx): ' + estr((bhxl, bhxr, qhx)))
#print('(bhyl, bhyr, qhy): ' + estr((bhyl, bhyr, qhy)))
#print('(bhol, bhor, qho): ' + estr((bhol, bhor, qho)))
(alphax, alphay, alphao) = 1.0 - np.array((qhx, qhy, qho))
#print('(alphax, alphay, alphao):' + estr((alphax, alphay, alphao)))
# alpha ** j
alphaxj = [1.0]
alphayj = [1.0]
alphaoj = [1.0]
betaj = [dt]
for i in range(1, n):
alphaxj.append(alphaxj[i-1] * alphax)
alphayj.append(alphayj[i-1] * alphay)
alphaoj.append(alphaoj[i-1] * alphao)
betaj.append(betaj[i-1] + dt * alphaoj[i])
self.alphaxj = np.array(alphaxj)
self.alphayj = np.array(alphayj)
self.alphaoj = np.array(alphaoj)
self.betaj = np.array(betaj)
#print('alphaxj:' + estr(self.alphaxj))
#print('alphayj:' + estr(self.alphayj))
#print('alphaoj:' + estr(self.alphaoj))
#print('betaj:' + estr(self.betaj))
def poses(self, ls, rs,
details=False
):
als = np.asarray(ls)
ars = np.asarray(rs)
self.als = als
self.ars = ars
#print('als:' + estr(als))
(px0, py0, theta0) = self.start_pose
(bhxl, bhxr, _) = self.bhes[0]
(bhyl, bhyr, _) = self.bhes[1]
(bhol, bhor, _) = self.bhes[2]
(vxw0, vyw0, omega0) = self.start_twist
n = self.n
dt = self.dt
alphaxj = self.alphaxj
alphayj = self.alphayj
alphaoj = self.alphaoj
# initial robot velocities
vx0 = vxw0 * math.cos(theta0) + vyw0 * math.cos(theta0)
vy0 = -vxw0 * math.sin(theta0) + vyw0 * math.cos(theta0)
# twist
vxj = np.empty(n)
vyj = np.empty(n)
omegaj = np.empty(n)
vxj[0] = vx0
vyj[0] = vy0
omegaj[0] = omega0
bmotorxj = bhxl * als + bhxr * ars
bmotoryj = bhyl * als + bhyr * ars
bmotoroj = bhol * als + bhor * ars
for i in range(1, n):
vxj[i] = vx0 * alphaxj[i] + np.dot(alphaxj[i-1::-1], bmotorxj[1:i+1])
vyj[i] = vy0 * alphayj[i] + np.dot(alphayj[i-1::-1], bmotoryj[1:i+1])
omegaj[i] = omega0 * alphaoj[i] + np.dot(alphaoj[i-1::-1], bmotoroj[1:i+1])
if details:
print(estr({'alphaoj[n-2::-1]': alphaoj[n-2::-1]}))
print(estr({'bmotoroj[1:n]': bmotoroj[1:n]}))
# pose
pxj = np.empty(n)
pyj = np.empty(n)
thetaj = np.empty(n)
pxj[0] = px0
pyj[0] = py0
thetaj[0] = theta0
for i in range(1, n):
thetaj[i] = theta0 + omega0 * (self.betaj[i] - dt) \
+ np.dot(self.betaj[i-1::-1], bmotoroj[1:i+1])
# intermediate values as vectors
cosj = np.cos(thetaj)
sinj = np.sin(thetaj)
vxcj = vxj * cosj
vxsj = vxj * sinj
vycj = vyj * cosj
vysj = vyj * sinj
vxwj = vxcj - vysj
vywj = vxsj + vycj
pxj[1:] = px0 + dt * np.cumsum(vxwj[1:])
pyj[1:] = py0 + dt * np.cumsum(vywj[1:])
# intermediate results
self.cosj = cosj
self.sinj = sinj
self.vxcj = vxcj
self.vxsj = vxsj
self.vycj = vycj
self.vysj = vysj
self.vxwj = vxwj
self.vywj = vywj
self.vxj = vxj
self.vyj = vyj
self.omegaj = omegaj
self.pxj = pxj
self.pyj = pyj
self.thetaj = thetaj
return (pxj, pyj, thetaj, vxj, vyj, omegaj)
def gradients(self):
# gradients
(bhxl, bhxr, _) = self.bhes[0]
(bhyl, bhyr, _) = self.bhes[1]
(bhol, bhor, _) = self.bhes[2]
n = self.n
dt = self.dt
alphaxj = self.alphaxj
alphayj = self.alphayj
betaj = self.betaj
cosj = self.cosj
sinj = self.sinj
vxcj = self.vxcj
vxsj = self.vxsj
vycj = self.vycj
vysj = self.vysj
dpxdl = np.zeros((n,n))
dpydl = np.zeros((n,n))
dpxdr = np.zeros((n,n))
dpydr = np.zeros((n,n))
for i in range(1, n):
# gradients
for k in range(1, i+1):
doto = np.dot((-vxsj[k:i+1] - vycj[k:i+1]), betaj[:i+1-k])
dotx = np.dot(cosj[k:i+1], alphaxj[:i+1-k])
doty = np.dot(-sinj[k:i+1], alphayj[:i+1-k])
dpxdl[i,k] = dt * (
+bhol * doto
+bhxl * dotx
+bhyl * doty
)
dpxdr[i,k] = dt * (
+bhor * doto
+bhxr * dotx
+bhyr * doty
)
#if i == 1 and k == 1:
# print(estr({'bhor': bhor, 'doto': doto, 'bhxr': bhxr, 'dotx': dotx,
# 'bhyr': bhyr, 'doty': doty}))
doto = np.dot((vxcj[k:i+1] - vysj[k:i+1]), betaj[:i+1-k])
dotx = np.dot(sinj[k:i+1], alphaxj[:i+1-k])
doty = np.dot(cosj[k:i+1], alphayj[:i+1-k])
dpydl[i,k] = dt * (
+bhol * doto
+bhxl * dotx
+bhyl * doty
)
dpydr[i,k] = dt * (
+bhor * doto
+bhxr * dotx
+bhyr * doty
)
self.dpxdl = dpxdl
self.dpydl = dpydl
self.dpxdr = dpxdr
self.dpydr = dpydr
return (dpxdl, dpxdr, dpydl, dpydr)
def seconds(self):
# second partial derivatives at final location
(bhxl, bhxr, _) = self.bhes[0]
(bhyl, bhyr, _) = self.bhes[1]
(bhol, bhor, _) = self.bhes[2]
n = self.n
dt = self.dt
alphaxj = self.alphaxj
alphayj = self.alphayj
betaj = self.betaj
cosj = self.cosj
sinj = self.sinj
vxwj = self.vxwj
vywj = self.vywj
d2pxdldl = np.zeros((n, n))
d2pxdldr = np.zeros((n, n))
d2pxdrdr = np.zeros((n, n))
d2pydldl = np.zeros((n, n))
d2pydldr = np.zeros((n, n))
d2pydrdr = np.zeros((n, n))
# This could be vectorized, but instead I do it discretely to more closely
# match the C++ version which is what we will actually use.
for j in range(1, n):
vxwdt = vxwj[j] * dt
vywdt = vywj[j] * dt
sdt = sinj[j] * dt
cdt = cosj[j] * dt
for k in range(1, j + 1):
betaljk = betaj[j-k] * bhol
betarjk = betaj[j-k] * bhor
alphaxljk = alphaxj[j-k] * bhxl
alphaxrjk = alphaxj[j-k] * bhxr
alphayljk = alphayj[j-k] * bhyl
alphayrjk = alphayj[j-k] * bhyr
for m in range(1, j + 1):
betaljm = betaj[j-m] * bhol
betarjm = betaj[j-m] * bhor
alphaxljm = alphaxj[j-m] * bhxl
alphaxrjm = alphaxj[j-m] * bhxr
alphayljm = alphaxj[j-m] * bhyl
alphayrjm = alphaxj[j-m] * bhyr
sumxll = (
-vxwdt * betaljk * betaljm
+sdt * (-betaljk * alphaxljm -alphaxljk * betaljm)
+cdt * (-betaljk * alphayljm -alphayljk * betaljm)
)
sumxlr = (
-vxwdt * betaljk * betarjm
+sdt * (-betaljk * alphaxrjm -alphaxljk * betarjm)
+cdt * (-betaljk * alphayrjm -alphayljk * betarjm)
)
sumxrr = (
-vxwdt * betarjk * betarjm
+sdt * (-betarjk * alphaxrjm -alphaxrjk * betarjm)
+cdt * (-betarjk * alphayrjm -alphayrjk * betarjm)
)
sumyll = (
-vywdt * betaljk * betaljm
+sdt * (-betaljk * alphayljm -alphayljk * betaljm)
+cdt * (betaljk * alphayljm +alphayljk * betaljm)
)
sumylr = (
-vywdt * betaljk * betarjm
+sdt * (-betaljk * alphayrjm -alphayljk * betarjm)
+cdt * (betaljk * alphayrjm +alphayljk * betarjm)
)
sumyrr = (
-vywdt * betarjk * betarjm
+sdt * (-betarjk * alphayrjm -alphayrjk * betarjm)
+cdt * (betarjk * alphayrjm +alphayrjk * betarjm)
)
#print('i,j,k,m', i, j, k, m)
d2pxdldl[k, m] += sumxll
d2pxdldr[k, m] += sumxlr
d2pxdrdr[k, m] += sumxrr
d2pydldl[k, m] += sumyll
d2pydldr[k, m] += sumylr
d2pydrdr[k, m] += sumyrr
self.d2pxdldl = d2pxdldl
self.d2pxdldr = d2pxdldr
self.d2pxdrdr = d2pxdrdr
self.d2pydldl = d2pydldl
self.d2pydldr = d2pydldr
self.d2pydrdr = d2pydrdr
return (d2pxdldl, d2pxdldr, d2pxdrdr, d2pydldl, d2pydldr, d2pydrdr)
def loss(self,
target_pose=(0.0, 0.0, 0.0),
target_twist=(0.0, 0.0, 0.0),
target_lr = (0.0, 0.0),
Wmax=1.0e-4,
Wjerk=1.0e-4,
Wback=1.0e-4,
mmax=1.0,
details=False
):
self.target_pose = target_pose
self.target_twist = target_twist
self.target_lr = target_lr
self.Wmax = Wmax
self.Wjerk = Wjerk
self.Wback = Wback
self.mmax = mmax
return self.reloss(details=details)
def reloss(self, details=False):
target_pose = self.target_pose
target_twist = self.target_twist
target_lr = self.target_lr
Wmax = self.Wmax
Wjerk = self.Wjerk
Wback = self.Wback
mmax = self.mmax
# given pose calculations, determine the loss
vxj = self.vxj
vyj = self.vyj
omegaj = self.omegaj
pxj = self.pxj
pyj = self.pyj
thetaj = self.thetaj
lefts = self.als
rights = self.ars
# values requiring summing over i
sumMax = 0.1 * Wmax * (
np.power(lefts, 10.0).sum() +np.power(rights, 10.0).sum()
) / mmax ** 10
# backing term
sumBack = 0.1 * Wback * np.power((lefts + rights).clip(max=0.0), 10).sum()
ldiff = lefts[1:] - lefts[:-1]
rdiff = rights[1:] - rights[:-1]
sumJerk = 0.5 * Wjerk * (np.square(ldiff).sum() + np.square(rdiff).sum())
# values based on final targets
vals = np.asarray([
pxj[-1]
, pyj[-1]
, thetaj[-1]
, vxj[-1]
, vyj[-1]
, omegaj[-1]
, lefts[-1]
, rights[-1]
])
targets = np.concatenate([target_pose, target_twist, target_lr])
#targets = np.concatenate([target_pose, target_twist[:1], target_lr])
diffs = vals - targets
# normalize theta difference from -pi to pi
diffs[2] = (diffs[2] + math.pi) % (2 * math.pi) - math.pi
sumTargets = 0.5 * np.square(diffs).sum()
loss = sumMax + sumJerk + sumTargets + sumBack
if details:
print('target losses: ' + estr(0.5 * np.square(vals - targets)))
print(estr({'loss': loss, 'sumMax': sumMax, 'sumJerk': sumJerk, 'sumTargets': sumTargets, 'sumBack': sumBack}))
print(fstr({'vals': vals}, fmat='15.12g'))
print(fstr({'targets': targets}))
print(fstr({'diffs': diffs}))
self.lossValue = loss
return loss
def jacobian(self):
# the 1st derivative of the loss function
vxj = self.vxj
vyj = self.vyj
omegaj = self.omegaj
pxj = self.pxj
pyj = self.pyj
thetaj = self.thetaj
(pxt, pyt, thetat) = self.target_pose
(vxt, vyt, omegat) = self.target_twist
(leftt, rightt) = self.target_lr
dpxdl = self.dpxdl
dpydl = self.dpydl
dpxdr = self.dpxdr
dpydr = self.dpydr
(bhxl, bhxr, _) = self.bhes[0]
(bhyl, bhyr, _) = self.bhes[1]
(bhol, bhor, _) = self.bhes[2]
alphaxj = self.alphaxj
alphayj = self.alphayj
alphaoj = self.alphaoj
betaj = self.betaj
Wmax = self.Wmax
Wjerk = self.Wjerk
Wback = self.Wback
mmax = self.mmax
lefts = self.als
rights = self.ars
leftsp9 = np.power(lefts / mmax, 9)
rightsp9 = np.power(rights / mmax, 9)
lprsp9 = np.power((lefts + rights).clip(max=0.0), 9)
n = len(lefts)
dlefts = np.zeros([n])
drights = np.zeros([n])
difft = (thetaj[-1] - thetat + math.pi) % (2 * math.pi) - math.pi
for k in range(1, n):
dlefts[k] = (
+(vxj[-1] - vxt) * bhxl * alphaxj[n-1-k]
+(vyj[-1] - vyt) * bhyl * alphayj[n-1-k]
+(omegaj[-1] - omegat) * bhol * alphaoj[n-1-k]
+(difft) * bhol * betaj[n-1-k]
+(pxj[-1] - pxt) * dpxdl[-1, k]
+(pyj[-1] - pyt) * dpydl[-1, k]
+Wmax * leftsp9[k] / mmax
+Wback * lprsp9[k]
+Wjerk * (2 * lefts[k] -lefts[k-1] -lefts[min(k+1, n-1)])
)
drights[k] = (
+(vxj[-1] - vxt) * bhxr * alphaxj[n-1-k]
+(vyj[-1] - vyt) * bhyr * alphayj[n-1-k]
+(omegaj[-1] - omegat) * bhor * alphaoj[n-1-k]
+(difft) * bhor * betaj[n-1-k]
+(pxj[-1] - pxt) * dpxdr[-1, k]
+(pyj[-1] - pyt) * dpydr[-1, k]
+Wmax * rightsp9[k]
+Wback * lprsp9[k]
+Wjerk * (2 * rights[k] -rights[k-1] -rights[min(k+1, n-1)])
)
# TODO: check this
dlefts[-1] += (lefts[-1] - leftt)
drights[-1] += (rights[-1] - rightt)
self.dlefts = dlefts
self.drights = drights
return (dlefts, drights)
def hessian(self):
# second derivative of the loss function
pxj = self.pxj
pyj = self.pyj
(pxt, pyt, _) = self.target_pose
dpxdl = self.dpxdl
dpydl = self.dpydl
dpxdr = self.dpxdr
dpydr = self.dpydr
(bhxl, bhxr, _) = self.bhes[0]
(bhyl, bhyr, _) = self.bhes[1]
(bhol, bhor, _) = self.bhes[2]
alphaxj = self.alphaxj
alphayj = self.alphayj
alphaoj = self.alphaoj
betaj = self.betaj
Wmax = self.Wmax
Wjerk = self.Wjerk
Wback = self.Wback
mmax = self.mmax
lefts = self.als
rights = self.ars
d2pxdldl = self.d2pxdldl
d2pxdldr = self.d2pxdldr
d2pxdrdr = self.d2pxdrdr
d2pydldl = self.d2pydldl
d2pydldr = self.d2pydldr
d2pydrdr = self.d2pydrdr
n = len(lefts) - 1
# We'll define this as 0 -> n-1 are lefts[1:], n -> 2n-1 are rights[1:]
hess = np.empty([2*n, 2*n])
# values that vary with each k, m value
deltapxn = pxj[-1] - pxt
deltapyn = pyj[-1] - pyt
for i in range(0, 2*n):
k = i % n + 1
kleft = (i < n)
if kleft:
dpxdu = dpxdl[n, k]
dpydu = dpydl[n, k]
dvxdu = alphaxj[n-k] * bhxl
dvydu = alphayj[n-k] * bhyl
domdu = alphaoj[n-k] * bhol
dthdu = betaj[n-k] * bhol
else:
dpxdu = dpxdr[n, k]
dpydu = dpydr[n, k]
dvxdu = alphaxj[n-k] * bhxr
dvydu = alphayj[n-k] * bhyr
domdu = alphaoj[n-k] * bhor
dthdu = betaj[n-k] * bhor
for j in range(0, 2*n):
m = j % n + 1
mleft = (j < n)
if mleft:
dpxds = dpxdl[n, m]
dpyds = dpydl[n, m]
dvxds = alphaxj[n-m] * bhxl
dvyds = alphayj[n-m] * bhyl
domds = alphaoj[n-m] * bhol
dthds = betaj[n-m] * bhol
if kleft:
d2px = d2pxdldl[k, m]
d2py = d2pydldl[k, m]
else:
# note d2pxdrdl[i,j] = d2pxdldr[j,i]
d2px = d2pxdldr[m, k]
d2py = d2pydldr[m, k]
else:
dpxds = dpxdr[n, m]
dpyds = dpydr[n, m]
dvxds = alphaxj[n-m] * bhxr
dvyds = alphayj[n-m] * bhyr
domds = alphaoj[n-m] * bhor
dthds = betaj[n-m] * bhor
if kleft:
d2px = d2pxdldr[k, m]
d2py = d2pydldr[k, m]
else:
d2px = d2pxdrdr[k, m]
d2py = d2pydrdr[k, m]
hess[i, j] = (
deltapxn * d2px + dpxdu * dpxds +
deltapyn * d2py + dpydu * dpyds +
dvxdu * dvxds + dvydu * dvyds + domdu * domds + dthdu * dthds
)
# values that require k == m
for i in range(0, 2*n):
k = i % n + 1
kleft = (i < n)
# max term
# TODO: I need factor of 9 here?
hess[i, i] += 9. * (Wmax / mmax**2) * (lefts[k]**8 if kleft else rights[k]**8)
# back term
if (lefts[k] + rights[k]) < 0.0:
hess[i, i] += 9. * Wback * (lefts[k] + rights[k])**8
# motor target value
if k == n:
hess[i, i] += 1.0
# jerk term
hess[i, i] += 2 *Wjerk
if k > 1:
hess[i, i-1] -= Wjerk
if k == n:
hess[i, i] -= Wjerk
else:
hess[i, i+1] -= Wjerk
self.hess = hess
return hess
def dloss_dleft(self, j, eps=1.e-3):
# numerical estimate of loss derivative at left[j]
base_als = self.als.copy()
lefts = base_als.copy()
lefts[j] += eps
nr.poses(lefts, self.ars)
loss_plus = nr.reloss()
lefts = base_als.copy()
lefts[j] -= eps
nr.poses(lefts, self.ars)
loss_minus = nr.reloss()
self.als = base_als
dloss = 0.5 * (loss_plus - loss_minus) / eps
return dloss
def d2loss_dl_dl(self, k, eps=0.0001):
# numerical estimate of second derivative of loss dl dl
base_als = self.als.copy()
n = len(self.als)
d2lossj = [0.0]
for j in range(1, n):
lefts = base_als.copy()
lefts[k] += eps
self.als = lefts
#dlossp = self.dloss_dleft(j, eps)
nr.poses(lefts, self.ars)
nr.gradients()
nr.jacobian()
dlossp = self.dlefts[j]
pxp = self.pxj[-1]
lefts = base_als.copy()
lefts[k] -= eps
self.als = lefts
#dlossm = self.dloss_dleft(j, eps)
nr.poses(lefts, self.ars)
nr.gradients()
nr.jacobian()
dlossm = self.dlefts[j]
pxm = self.pxj[-1]
d2lossj.append(0.5 * (dlossp - dlossm) / eps)
#print(estr({'pxp': pxp, 'pxm': pxm, 'pxp - pxm': pxp - pxm}))
print(estr(({'dlossp': dlossp, 'dlossm': dlossm, 'dlossp-dlossm': dlossp-dlossm, 'wjerk': self.Wjerk})))
self.als = base_als
return d2lossj
def dloss_dright(self, j, eps=0.0001):
# numerical estimate of loss derivative at right[j]
base_ars = self.ars.copy()
rights = base_ars.copy()
rights[j] += eps
nr.poses(self.als, rights)
loss_plus = nr.reloss()
rights = base_ars.copy()
rights[j] -= eps
nr.poses(self.als, rights)
loss_minus = nr.reloss()
self.ars = base_ars
dloss = 0.5 * (loss_plus - loss_minus) / eps
return dloss
if __name__ == '__main__':
from bdbd_common.pathPlan2 import PathPlan
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12,4))
dt = 0.05
lr_model = default_lr_model()
#lr_model = ((1.0, 1.0, 10.0), (-1.0, 1.0, 10.0), (-1.0, 10.0, 10.0))
start_pose = [0.0, 0.0, 0.0]
start_twist = [0.0, 0.0, 0.0]
target_pose = [0.2, .1, D_TO_R * 180]
target_twist = [0.0, 0.0, 0.0]
approach_rho = 0.05
min_rho = 0.02
cruise_v = 0.25
lr_start = (0.0, 0.0)
gauss_iters = 0
nr_iters = 20
Wmax = dt * 1.e-3
#Wmax = 0.0
Wjerk = dt * 1.e-3
Wback = 1.0
#Wback = 0.0
NRstart = 1.0
NRfact = 2
maxSlew = 1.00
testNR = False
pp = PathPlan(approach_rho=approach_rho, min_rho=min_rho)
pathPlan = pp.start2(start_pose, target_pose)
print('path_plan:')
for segment in pathPlan:
print(fstr(segment, fmat='10.7f'))
# estimate left, right to achieve the path
speedPlan = pp.speedPlan(start_twist[0], cruise_v, target_twist[0], u=0.10)
print('speed_plan:')
for segment in speedPlan:
print(fstr(segment, fmat='10.7f'))
vxr0 = start_twist[0] * math.cos(start_pose[2]) + start_twist[1] * math.sin(start_pose[2])
vyr0 = -start_twist[0] * math.sin(start_pose[2]) + start_twist[1] * math.cos(start_pose[2])
last_vx = vxr0
last_omega = start_twist[2]
vxres = [vxr0]
vyres = [vyr0]
omegas = [start_twist[2]]
vvs = [pp.v(0.0)]
vvs[0]['left'] = lr_start[0]
vvs[0]['right'] = lr_start[1]
lefts = [lr_start[0]]
rights = [lr_start[1]]
tt = 0.0
tees = [tt]
while True:
tt += dt
vv = pp.v(tt)
vvs.append(vv)
# vv gives vhat is in wheel frame. We need to convert to robot frame.
vxres.append(vv['v'])
vyres.append(vv['omega'] * pp.dwheel)
omegas.append(vv['omega'])
(left, right, last_vx, last_omega) = lr_est(vv['v'], vv['omega'], last_vx, last_omega, dt)
lefts.append(left)
rights.append(right)
tees.append(tt)
vv['left'] = left
vv['right'] = right
if vv['fraction'] > 0.9999:
break
for seg in vvs:
print(estr(seg))
# send to C++ node for processing
rospy.init_node('NewRaph')
lrPub = rospy.Publisher('rawLR', LeftRights, queue_size=10)
lrMsg = LeftRights()
lrMsg.dt = dt
lrMsg.lefts = lefts
lrMsg.rights = rights
start_lefts = lefts.copy()
start_rights = rights.copy()
while not rospy.is_shutdown():
lefts = start_lefts.copy()
rights = start_rights.copy()
lrPub.publish(lrMsg)
print('\n***** publishing rawLR *****')
n = len(lefts)
nr = NewRaph(n, dt
,lr_model=lr_model
,start_pose=start_pose
,start_twist=start_twist
)
eps = 1.0
nhess = len(lefts) - 1
axis3 = None
gauss_count = 0
nr_count = 0
while True:
if rospy.is_shutdown():
break
base_lefts = lefts.copy()
base_rights = rights.copy()
rospy.sleep(0.01)
(pxj, pyj, thetaj, vxj, vyj, omegaj) = nr.poses(lefts, rights)
loss = nr.loss(mmax=1.0, target_pose=target_pose, Wmax=Wmax, Wjerk=Wjerk, Wback=Wback, details=True)
print('loss: ' + estr(loss))
(dpxdl, dpxdr, dpydl, dpydr) = nr.gradients()
(dlefts, drights) = nr.jacobian()
#print(gstr({'dlefts': dlefts, '\ndrights': drights}))
if gauss_count < gauss_iters:
#eps = 1.0
gauss_count += 1
slew = 0.0
for i in range(1, n):
if abs(dlefts[i]) > slew:
slew = abs(dlefts[i])
if abs(drights[i]) > slew:
slew = abs(drights[i])
# line search over deltax looking for best eps
best_eps = 0.0
best_loss = loss
worst_eps = maxSlew / slew
print('eps limited to ', worst_eps)
eps = min(eps, worst_eps)
for lcount in range(4):
last_eps = eps
for i in range(1, n):
lefts[i] = base_lefts[i] - eps*dlefts[i]
rights[i] = base_rights[i] - eps*drights[i]
nr.poses(lefts, rights)
loss = nr.reloss()
if loss > best_loss:
worst_eps = eps
else:
best_eps = eps
best_loss = loss
if eps * 2 < worst_eps:
eps *= 2
else:
eps = 0.5 * (best_eps + worst_eps)
print(estr({'(G)eps': last_eps, 'loss': loss, 'best_eps': best_eps, 'worst_eps': worst_eps, 'new_eps': eps}))
eps = best_eps
for i in range(1, n):
lefts[i] = base_lefts[i] - eps*dlefts[i]
rights[i] = base_rights[i] - eps*drights[i]
else:
if nr_count >= nr_iters:
break
nr_count += 1
nr.seconds()
hess = nr.hessian()
b = np.concatenate([-nr.dlefts[1:], -nr.drights[1:]])
deltax = np.linalg.solve(nr.hess, b)
slew = np.amax(np.absolute(deltax))
# line search over deltax looking for best eps
best_eps = 0.0
best_loss = loss
worst_eps = maxSlew / slew
eps = min(eps, worst_eps)
for lcount in range(4):
last_eps = eps
lefts[1:] = base_lefts[1:] + eps * deltax[:nhess]
rights[1:] = base_rights[1:] + eps * deltax[nhess:]
nr.poses(lefts, rights)
loss = nr.reloss()
if loss > best_loss:
worst_eps = eps
else:
best_eps = eps
best_loss = loss
if eps * 2 < worst_eps:
eps *= 2
else:
eps = 0.5 * (best_eps + worst_eps)
print(estr({'(N)eps': last_eps, 'loss': loss, 'best_eps': best_eps, 'worst_eps': worst_eps, 'new_eps': eps}))
eps = best_eps
#eps = min(best_eps, 1.0)
print('using eps: ', eps)
lefts[1:] = base_lefts[1:] + eps * deltax[:nhess]
rights[1:] = base_rights[1:] + eps * deltax[nhess:]
fig.clf()
plt1 = fig.add_subplot(131)
#plt1.axis([0.0, tfPath.lrs[-1]['t'], -1.5, 1.5])
plt2 = fig.add_subplot(132)
plt3 = fig.add_subplot(133)
if axis3 is not None:
plt3.axis(axis3)
plt2.axis('equal')
plt1.plot(tees, lefts)
plt1.plot(tees, rights)
plt1.plot(tees, omegaj)
plt2.plot(pxj, pyj)
plt3.plot(tees, pxj)
plt3.plot(tees, pyj)
if gauss_count == 1:
plt.pause(1.0)
else:
plt.pause(1.0)
if axis3 is None:
axis3 = plt3.axis()
plt.waitforbuttonpress()
| 33.996471
| 129
| 0.462193
|
29f6e512205b95c1a71f5520b9b75a375be8a6d4
| 3,980
|
py
|
Python
|
metadata-ingestion/src/datahub/ingestion/source/aws/aws_common.py
|
magska/datahub
|
f2e2a4d5b7041400ab14dfd0109707518b9a02d3
|
[
"Apache-2.0"
] | 1
|
2021-10-21T07:34:13.000Z
|
2021-10-21T07:34:13.000Z
|
metadata-ingestion/src/datahub/ingestion/source/aws/aws_common.py
|
iamduyu/datahub
|
4c33124e8f5582749877e30ac2b0c0c1bfa06f42
|
[
"Apache-2.0"
] | 29
|
2021-10-05T12:12:02.000Z
|
2022-03-31T22:12:39.000Z
|
metadata-ingestion/src/datahub/ingestion/source/aws/aws_common.py
|
iamduyu/datahub
|
4c33124e8f5582749877e30ac2b0c0c1bfa06f42
|
[
"Apache-2.0"
] | 2
|
2021-09-15T09:40:34.000Z
|
2022-03-04T16:24:52.000Z
|
from functools import reduce
from typing import TYPE_CHECKING, List, Optional, Union
import boto3
from boto3.session import Session
from datahub.configuration import ConfigModel
from datahub.configuration.common import AllowDenyPattern
from datahub.emitter.mce_builder import DEFAULT_ENV
if TYPE_CHECKING:
from mypy_boto3_glue import GlueClient
from mypy_boto3_s3 import S3Client, S3ServiceResource
from mypy_boto3_sagemaker import SageMakerClient
def assume_role(
role_arn: str, aws_region: str, credentials: Optional[dict] = None
) -> dict:
credentials = credentials or {}
sts_client = boto3.client(
"sts",
region_name=aws_region,
aws_access_key_id=credentials.get("AccessKeyId"),
aws_secret_access_key=credentials.get("SecretAccessKey"),
aws_session_token=credentials.get("SessionToken"),
)
assumed_role_object = sts_client.assume_role(
RoleArn=role_arn, RoleSessionName="DatahubIngestionSource"
)
return assumed_role_object["Credentials"]
class AwsSourceConfig(ConfigModel):
"""
Common AWS credentials config.
Currently used by:
- Glue source
- SageMaker source
"""
env: str = DEFAULT_ENV
database_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
table_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
aws_access_key_id: Optional[str] = None
aws_secret_access_key: Optional[str] = None
aws_session_token: Optional[str] = None
aws_role: Optional[Union[str, List[str]]] = None
aws_region: str
def get_session(self) -> Session:
if (
self.aws_access_key_id
and self.aws_secret_access_key
and self.aws_session_token
):
return Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
region_name=self.aws_region,
)
elif self.aws_access_key_id and self.aws_secret_access_key:
return Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.aws_region,
)
elif self.aws_role:
if isinstance(self.aws_role, str):
credentials = assume_role(self.aws_role, self.aws_region)
else:
credentials = reduce(
lambda new_credentials, role_arn: assume_role(
role_arn, self.aws_region, new_credentials
),
self.aws_role,
{},
)
return Session(
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
region_name=self.aws_region,
)
else:
return Session(region_name=self.aws_region)
def get_s3_client(self) -> "S3Client":
return self.get_session().client("s3")
def get_s3_resource(self) -> "S3ServiceResource":
return self.get_session().resource("s3")
def get_glue_client(self) -> "GlueClient":
return self.get_session().client("glue")
def get_sagemaker_client(self) -> "SageMakerClient":
return self.get_session().client("sagemaker")
def make_s3_urn(s3_uri: str, env: str, suffix: Optional[str] = None) -> str:
if not s3_uri.startswith("s3://"):
raise ValueError("S3 URIs should begin with 's3://'")
# remove S3 prefix (s3://)
s3_name = s3_uri[5:]
if s3_name.endswith("/"):
s3_name = s3_name[:-1]
if suffix is not None:
return f"urn:li:dataset:(urn:li:dataPlatform:s3,{s3_name}_{suffix},{env})"
return f"urn:li:dataset:(urn:li:dataPlatform:s3,{s3_name},{env})"
| 32.892562
| 82
| 0.646482
|
6363699ccb23cbc7837daf41f8d4f7bd75e56607
| 3,608
|
py
|
Python
|
poptimizer/data/adapters/gateways/conomy.py
|
poliyev/poptimizer
|
71935c4365b0572e65b6d3172f925701dda283db
|
[
"Unlicense"
] | null | null | null |
poptimizer/data/adapters/gateways/conomy.py
|
poliyev/poptimizer
|
71935c4365b0572e65b6d3172f925701dda283db
|
[
"Unlicense"
] | null | null | null |
poptimizer/data/adapters/gateways/conomy.py
|
poliyev/poptimizer
|
71935c4365b0572e65b6d3172f925701dda283db
|
[
"Unlicense"
] | 1
|
2021-12-02T13:32:44.000Z
|
2021-12-02T13:32:44.000Z
|
"""Загрузка данных с https://www.conomy.ru/."""
import asyncio
from typing import Final, Optional, cast
import pandas as pd
from pyppeteer import errors
from pyppeteer.page import Page
from poptimizer.data.adapters.gateways import gateways
from poptimizer.data.adapters.html import cell_parser, chromium, description, parser
from poptimizer.shared import adapters, col
# Параметры поиска страницы эмитента
SEARCH_URL: Final = "https://www.conomy.ru/search"
SEARCH_FIELD: Final = '//*[@id="issuer_search"]'
# Параметры поиска данных по дивидендам
DIVIDENDS_MENU: Final = '//*[@id="page-wrapper"]/div/nav/ul/li[5]/a'
DIVIDENDS_TABLE: Final = '//*[@id="page-container"]/div[2]/div/div[1]'
# Номер таблицы на html-странице и строки с заголовком
TABLE_INDEX: Final = 1
# Задержка для принудительной остановки Chromium
CHROMIUM_TIMEOUT = 30
async def _load_ticker_page(page: Page, ticker: str) -> None:
"""Вводит в поле поиска тикер и переходит на страницу с информацией по эмитенту."""
await page.goto(SEARCH_URL)
await page.waitForXPath(SEARCH_FIELD)
element = (await page.xpath(SEARCH_FIELD))[0]
await element.type(ticker)
await element.press("Enter")
async def _load_dividends_table(page: Page) -> None:
"""Выбирает на странице эмитента меню дивиденды и дожидается загрузки таблиц с ними."""
await page.waitForXPath(DIVIDENDS_MENU)
element = (await page.xpath(DIVIDENDS_MENU))[0]
await element.click()
await page.waitForXPath(DIVIDENDS_TABLE)
async def _get_html(ticker: str, browser: chromium.Browser = chromium.BROWSER) -> str:
"""Возвращает html-код страницы с данными по дивидендам с сайта https://www.conomy.ru/."""
async with browser.get_new_page() as page:
await _load_ticker_page(page, ticker)
await _load_dividends_table(page)
return cast(str, await page.content())
def _get_col_desc(ticker: str) -> parser.Descriptions:
"""Формирует список с описанием необходимых столбцов."""
date = description.ColDesc(
num=5,
raw_name=("E", "Дата закрытия реестра акционеров", "Под выплату дивидендов"),
name=col.DATE,
parser_func=cell_parser.date_ru,
)
columns = [date]
if description.is_common(ticker):
common = description.ColDesc(
num=7,
raw_name=("G", "Размер дивидендов", "АОИ"),
name=ticker,
parser_func=cell_parser.div_ru,
)
columns.append(common)
return columns
preferred = description.ColDesc(
num=8,
raw_name=("H", "Размер дивидендов", "АПИ"),
name=ticker,
parser_func=cell_parser.div_ru,
)
columns.append(preferred)
return columns
class ConomyGateway(gateways.DivGateway):
"""Обновление для таблиц с дивидендами на https://www.conomy.ru/."""
_logger = adapters.AsyncLogger()
async def __call__(self, ticker: str) -> Optional[pd.DataFrame]:
"""Получение дивидендов для заданного тикера."""
self._logger(ticker)
try:
# На некоторых компьютерах/операционных системах Chromium перестает реагировать на команды
# Поэтому загрузка принудительно приостанавливается
html = await asyncio.wait_for(_get_html(ticker), timeout=CHROMIUM_TIMEOUT)
except (errors.TimeoutError, asyncio.exceptions.TimeoutError):
return None
cols_desc = _get_col_desc(ticker)
df = parser.get_df_from_html(html, TABLE_INDEX, cols_desc)
df = df.dropna()
df = self._sort_and_agg(df)
df[col.CURRENCY] = col.RUR
return df
| 34.037736
| 102
| 0.690133
|
e8c2564f86014ba6dd3b007ca5ae6728ddb26abb
| 1,567
|
py
|
Python
|
wagtail_icon_picker/edit_handlers.py
|
xeroticikot/wagtail-icon-picker
|
7b76071ad5ff844e9f05dac02135af4b817d7480
|
[
"MIT"
] | 1
|
2021-11-07T15:52:58.000Z
|
2021-11-07T15:52:58.000Z
|
wagtail_icon_picker/edit_handlers.py
|
xeroticikot/wagtail-icon-picker
|
7b76071ad5ff844e9f05dac02135af4b817d7480
|
[
"MIT"
] | null | null | null |
wagtail_icon_picker/edit_handlers.py
|
xeroticikot/wagtail-icon-picker
|
7b76071ad5ff844e9f05dac02135af4b817d7480
|
[
"MIT"
] | null | null | null |
from wagtail.admin.edit_handlers import FieldPanel
from wagtail_icon_picker.widgets import BoxIconPickerWidget, BootstrapIconPickerWidget, IcofontPickerWidget, \
FontAwesomeIconPickerWidget, BoxIconsInputWidget, BootstrapIconsInputWidget, IcofontInputWidget, \
FontAwesomeIconsInputWidget
class BootstrapIconPickerPanel(FieldPanel):
def widget_overrides(self):
return {
self.field_name: BootstrapIconPickerWidget(),
}
class BootstrapIconInputPanel(FieldPanel):
def widget_overrides(self):
return {
self.field_name: BootstrapIconsInputWidget(),
}
class BoxiconsPickerPanel(FieldPanel):
def widget_overrides(self):
return {
self.field_name: BoxIconPickerWidget(),
}
class BoxiconsInputPanel(FieldPanel):
def widget_overrides(self):
return {
self.field_name: BoxIconsInputWidget(),
}
class FontAwesomeIconPickerPanel(FieldPanel):
def widget_overrides(self):
return {
self.field_name: FontAwesomeIconPickerWidget(),
}
class FontAwesomeIconInputPanel(FieldPanel):
def widget_overrides(self):
return {
self.field_name: FontAwesomeIconsInputWidget(),
}
class IcofontIconPickerPanel(FieldPanel):
def widget_overrides(self):
return {
self.field_name: IcofontPickerWidget(),
}
class IcofontIconInputPanel(FieldPanel):
def widget_overrides(self):
return {
self.field_name: IcofontInputWidget(),
}
| 25.274194
| 110
| 0.689853
|
bedfb1f04188e0bb4e4c98ea21d872dabd15a257
| 193
|
py
|
Python
|
ProfissionalEnc/forms.py
|
robertoalcaras/ProjetoIntegrador
|
796fb5d78ed9ad7b842bc76259dfb5f71b25dc48
|
[
"MIT"
] | 1
|
2022-03-08T11:29:09.000Z
|
2022-03-08T11:29:09.000Z
|
ProfissionalEnc/forms.py
|
robertoalcaras/ProjetoIntegrador
|
796fb5d78ed9ad7b842bc76259dfb5f71b25dc48
|
[
"MIT"
] | null | null | null |
ProfissionalEnc/forms.py
|
robertoalcaras/ProjetoIntegrador
|
796fb5d78ed9ad7b842bc76259dfb5f71b25dc48
|
[
"MIT"
] | null | null | null |
from django import forms
from ProfissionalEnc.models import ProfissionalEnc
class ProfissionalEncForm(forms.ModelForm):
class Meta:
model = ProfissionalEnc
fields = '__all__'
| 27.571429
| 50
| 0.761658
|
50b8fa21b128f472c5dbbadf978f91f2183b96a2
| 1,193
|
py
|
Python
|
Modules/gr-tutorial/python/__init__.py
|
mfkiwl/Post-Shannon-SDR
|
19a40910f0fd3d78a65f1aedc17747f6bb770f62
|
[
"MIT"
] | 4
|
2021-08-17T08:39:10.000Z
|
2022-03-25T17:03:53.000Z
|
Modules/gr-tutorial/python/__init__.py
|
jracevedob/Post-Shannon-SDR
|
19a40910f0fd3d78a65f1aedc17747f6bb770f62
|
[
"MIT"
] | null | null | null |
Modules/gr-tutorial/python/__init__.py
|
jracevedob/Post-Shannon-SDR
|
19a40910f0fd3d78a65f1aedc17747f6bb770f62
|
[
"MIT"
] | null | null | null |
#
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This application is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The presence of this file turns this directory into a Python package
'''
This is the GNU Radio TUTORIAL module. Place your Python package
description here (python/__init__.py).
'''
from __future__ import unicode_literals
# import swig generated symbols into the tutorial namespace
try:
# this might fail if the module is python-only
from .tutorial_swig import *
except ImportError:
pass
# import any pure python here
#
| 33.138889
| 74
| 0.766136
|
574be00a5c4617cf51d49d77a306d51d689ee044
| 4,129
|
py
|
Python
|
bin/L8_TOA_refl.py
|
jmichellehu/rs_tools
|
7fbf8bed198dd4d7b7ee065fbc853a37ce9ac700
|
[
"MIT"
] | null | null | null |
bin/L8_TOA_refl.py
|
jmichellehu/rs_tools
|
7fbf8bed198dd4d7b7ee065fbc853a37ce9ac700
|
[
"MIT"
] | null | null | null |
bin/L8_TOA_refl.py
|
jmichellehu/rs_tools
|
7fbf8bed198dd4d7b7ee065fbc853a37ce9ac700
|
[
"MIT"
] | 1
|
2020-07-01T15:26:14.000Z
|
2020-07-01T15:26:14.000Z
|
#!/usr/bin/env python
# This script calculates TOA reflectance for Landsat 8 Level 1 imagery
# import libraries
# requires gdal and geoio from https://github.com/DigitalGlobe/geoio
# uses functions created by dshean's dgtools github repo
import math
import geoio
from gdalconst import *
import argparse
import re
# Have user define input data from MTL file and output filename
parser = argparse.ArgumentParser(description='GeoTiff Landsat 8 Multispectral Image to TOA Reflectance Script')
parser.add_argument('-in', '--input_file', help='GeoTiff multi band MS image file', required=True)
# parser.add_argument('-in_band', '--input_band', help='GeoTiff multi band', required=True)
# parser.add_argument('-M', '--input_Mp', help='GeoTiff multi band Reflectance Multiplication input', required=True)
# parser.add_argument('-A', '--input_Ap', help='GeoTiff multi band Reflectance Addition input', required=True)
# parser.add_argument('-sun', '--input_SunEl', help='GeoTiff multi band Sun Elevation input', required=True)
parser.add_argument('-in_MTL', '--input_MTL_textfile', help='Delivered with L8 imagery', required=True)
parser.add_argument('-out', '--output_file', help='Where TOA reflectance image is to be saved', required=True)
args = parser.parse_args()
in_filename = args.input_file
# Mp = float(args.input_Mp)
# Ap = float(args.input_Ap)
# sunelev = float(args.input_SunEl)
in_MTL_filename = args.input_MTL_textfile
out_filename = args.output_file
######## --------- Define functions --------- ########
# Check that values for list are equivalent. Sourced from https://stackoverflow.com/questions/3844801/check-if-all-elements-in-a-list-are-identical
def check_equal(some_list):
# return boolean of equality for 2nd element to end and 1st element to penultimate
return some_list[1:] == some_list[:-1]
def get_val(some_list):
# extract value after " = " in list of strings
vals = [val.split(' = ')[1] for val in some_list]
return(vals)
### --- Extract Mp, Ap, and sunelev values from MTL file --- ###
mtl_list = []
with open(in_MTL_filename, 'r') as f:
for line in f:
# strip the trailing newline character
line=line.rstrip()
# and strip the leading whitespaces, newline, and tab characters
line=line.lstrip()
# append this to the list
mtl_list.append(line)
# Use regular expressions to find matches for the Mp, Ap, and SunEl values
Mp_pattern=re.compile(r"(REFLECTANCE_MULT).*")
Ap_pattern=re.compile(r"(REFLECTANCE_ADD).*")
Sun_pattern=re.compile(r"(SUN_).*")
# iterate through each line in the list and return matches
Mp_list = [m.group() for line in mtl_list for m in [Mp_pattern.search(line)] if m]
Ap_list = [m.group() for line in mtl_list for m in [Ap_pattern.search(line)] if m]
Sun_list = [m.group() for line in mtl_list for m in [Sun_pattern.search(line)] if m]
# extract corresponding value (i.e. the bit after " = ")
Mp_val = get_val(Mp_list)
Ap_val = get_val(Ap_list)
Sun_val = get_val(Sun_list)
# Check that each band has the same value for Mp and Ap, and save extracted values as floats in the Mp, Ap, and sunel variables to be used in L8_toa_refl calculations. Otherwise, flag it and tell the user to check the MTL file
if check_equal(Mp_val):
Mp=float(Mp_val[0])
else:
print("Mp values are not equal, examine MTL file")
print(Mp_list)
if check_equal(Ap_val):
Ap=float(Ap_val[0])
else:
print("Ap values are not equal, examine MTL file")
print(Ap_list)
if (float(Sun_val[1]) <= 90.0 and float(Sun_val[1]) >=0.0):
sunelev = float(Sun_val[1])
else:
print("Sun elevation value out of bounds, examine MTL file")
print(Sun_val)
print(Mp, Ap, sunelev)
######## --------- CONVERT TO TOA REFLECTANCE --------- ########
# Open the multiband landsat image
img=geoio.GeoImage(in_filename)
# Numpy arrays of tif
data=img.get_data()
# Calculate TOA reflectances - equations from https://landsat.usgs.gov/using-usgs-landsat-8-product
newdata = Mp * data + Ap
solzenith = 90-sunelev
TOA_refl = newdata/math.cos(solzenith/360*2*math.pi)
img.write_img_like_this(out_filename, TOA_refl)
| 35.904348
| 227
| 0.719545
|
7769b00137f89929fa7f2b33b3be9b749285aaf1
| 1,637
|
py
|
Python
|
src/dependencys/comtypes/comtypes/logutil.py
|
Advik-B/Virus
|
51bc71b9e0ce4953053d43fc88477ab29957b2c3
|
[
"MIT"
] | 1
|
2021-11-24T07:32:20.000Z
|
2021-11-24T07:32:20.000Z
|
src/dependencys/comtypes/comtypes/logutil.py
|
Advik-B/Virus
|
51bc71b9e0ce4953053d43fc88477ab29957b2c3
|
[
"MIT"
] | null | null | null |
src/dependencys/comtypes/comtypes/logutil.py
|
Advik-B/Virus
|
51bc71b9e0ce4953053d43fc88477ab29957b2c3
|
[
"MIT"
] | null | null | null |
# logutil.py
import logging, ctypes
class NTDebugHandler(logging.Handler):
def emit(
self,
record,
writeA=ctypes.windll.kernel32.OutputDebugStringA,
writeW=ctypes.windll.kernel32.OutputDebugStringW,
):
text = self.format(record)
if isinstance(text, str):
writeA(text + "\n")
else:
writeW(text + "\n")
logging.NTDebugHandler = NTDebugHandler
def setup_logging(*pathnames):
import configparser
parser = configparser.ConfigParser()
parser.optionxform = str # use case sensitive option names!
parser.read(pathnames)
DEFAULTS = {
"handler": "StreamHandler()",
"format": "%(levelname)s:%(name)s:%(message)s",
"level": "WARNING",
}
def get(section, option):
try:
return parser.get(section, option, True)
except (configparser.NoOptionError, configparser.NoSectionError):
return DEFAULTS[option]
levelname = get("logging", "level")
format = get("logging", "format")
handlerclass = get("logging", "handler")
# convert level name to level value
level = getattr(logging, levelname)
# create the handler instance
handler = eval(handlerclass, vars(logging))
formatter = logging.Formatter(format)
handler.setFormatter(formatter)
logging.root.addHandler(handler)
logging.root.setLevel(level)
try:
for name, value in parser.items("logging.levels", True):
value = getattr(logging, value)
logging.getLogger(name).setLevel(value)
except configparser.NoSectionError:
pass
| 26.836066
| 73
| 0.637141
|
8c1b56b603a6ffc9e850cc451633bb6b6ca1566c
| 5,514
|
py
|
Python
|
api_site/src/api_x/zyt/biz/__init__.py
|
webee/pay
|
b48c6892686bf3f9014bb67ed119506e41050d45
|
[
"W3C"
] | 1
|
2019-10-14T11:51:49.000Z
|
2019-10-14T11:51:49.000Z
|
api_site/src/api_x/zyt/biz/__init__.py
|
webee/pay
|
b48c6892686bf3f9014bb67ed119506e41050d45
|
[
"W3C"
] | null | null | null |
api_site/src/api_x/zyt/biz/__init__.py
|
webee/pay
|
b48c6892686bf3f9014bb67ed119506e41050d45
|
[
"W3C"
] | null | null | null |
# coding=utf-8
from api_x.constant import TransactionType
from api_x.zyt.biz.payment import handle_payment_result
from .pay import handle_payment_notify
from .prepaid import handle_prepaid_notify
from .refund import handle_refund_notify
from .withdraw import handle_withdraw_notify
from .query_notify import register_query_notify_handle
def init_test_pay_notify_handles():
from api_x.zyt.evas.test_pay.constant import BizType, NotifyType
from api_x.zyt.evas.test_pay.notify import register_notify_handle, register_pay_to_bankcard_notify_handle
register_notify_handle(TransactionType.PAYMENT, BizType.PAY, NotifyType.Pay.SYNC, handle_payment_result)
# payment
register_notify_handle(TransactionType.PAYMENT, BizType.PAY, NotifyType.Pay.ASYNC, handle_payment_notify)
# prepaid
register_notify_handle(TransactionType.PREPAID, BizType.PAY, NotifyType.Pay.ASYNC, handle_prepaid_notify)
# refund
register_notify_handle(TransactionType.REFUND, BizType.REFUND, NotifyType.Refund.ASYNC, handle_refund_notify)
# pay_to_bankcard
register_pay_to_bankcard_notify_handle(TransactionType.WITHDRAW, handle_withdraw_notify)
def init_lianlian_pay_notify_handles():
from api_x.zyt.evas.lianlian_pay import NAME
from api_x.zyt.evas.lianlian_pay.constant import NotifyType
from api_x.zyt.evas.lianlian_pay.notify import register_pay_notify_handle, register_refund_notify_handle
from api_x.zyt.evas.lianlian_pay.notify import register_pay_to_bankcard_notify_handle
from api_x.zyt.evas.lianlian_pay import query_refund_notify
# 不管是payment还是prepaid都使用两样的payment_result handler. tx type都为PAYMENT.
register_pay_notify_handle(TransactionType.PAYMENT, NotifyType.Pay.SYNC, handle_payment_result)
# payment
register_pay_notify_handle(TransactionType.PAYMENT, NotifyType.Pay.ASYNC, handle_payment_notify)
# prepaid
register_pay_notify_handle(TransactionType.PREPAID, NotifyType.Pay.ASYNC, handle_prepaid_notify)
# refund
register_refund_notify_handle(TransactionType.REFUND, handle_refund_notify)
# pay_to_bankcard
register_pay_to_bankcard_notify_handle(TransactionType.WITHDRAW, handle_withdraw_notify)
# query_notify
# notify(source, sn, sn_created_on[, vas_name])
register_query_notify_handle(TransactionType.REFUND, NAME, query_refund_notify)
def init_ali_pay_notify_handles():
from api_x.zyt.evas.ali_pay import NAME
from api_x.zyt.evas.ali_pay.constant import NotifyType
from api_x.zyt.evas.ali_pay.notify import register_pay_notify_handle, register_refund_notify_handle
from api_x.zyt.evas.ali_pay import query_pay_notify, query_refund_notify
# 不管是payment还是prepaid都使用两样的payment_result handler. tx type都为PAYMENT.
register_pay_notify_handle(TransactionType.PAYMENT, NotifyType.Pay.SYNC, handle_payment_result)
# payment
register_pay_notify_handle(TransactionType.PAYMENT, NotifyType.Pay.ASYNC, handle_payment_notify)
# prepaid
register_pay_notify_handle(TransactionType.PREPAID, NotifyType.Pay.ASYNC, handle_prepaid_notify)
# refund
register_refund_notify_handle(TransactionType.REFUND, handle_refund_notify)
# query_notify
# notify(source, sn, [, vas_name, vas_sn])
register_query_notify_handle(TransactionType.PAYMENT, NAME, query_pay_notify)
# notify(source, sn, sn_created_on[, vas_name])
register_query_notify_handle(TransactionType.REFUND, NAME, query_refund_notify)
def init_weixin_pay_notify_handles():
from api_x.zyt.evas.weixin_pay import NAME
from api_x.zyt.evas.weixin_pay.constant import NotifyType
from api_x.zyt.evas.weixin_pay.notify import register_pay_notify_handle, register_refund_notify_handle
from api_x.zyt.evas.weixin_pay import query_pay_notify, query_refund_notify
register_pay_notify_handle(TransactionType.PAYMENT, NotifyType.Pay.SYNC, handle_payment_result)
# payment
register_pay_notify_handle(TransactionType.PAYMENT, NotifyType.Pay.ASYNC, handle_payment_notify)
# prepaid
register_pay_notify_handle(TransactionType.PREPAID, NotifyType.Pay.ASYNC, handle_prepaid_notify)
# refund
register_refund_notify_handle(TransactionType.REFUND, handle_refund_notify)
# query_notify
# notify(source, sn, [, vas_name, vas_sn])
register_query_notify_handle(TransactionType.PAYMENT, NAME, query_pay_notify)
# notify(source, sn, sn_created_on[, vas_name])
register_query_notify_handle(TransactionType.REFUND, NAME, query_refund_notify)
def init_zyt_pay_notify_handles():
from .pay import handle_paid_out
from .refund import handle_refund_in
from api_x.zyt.vas.constant import NotifyType
from api_x.zyt.vas.notify import register_pay_notify_handle, register_refund_notify_handle
register_pay_notify_handle(TransactionType.PAYMENT, NotifyType.Pay.SYNC, handle_payment_result)
# payment
register_pay_notify_handle(TransactionType.PAYMENT, NotifyType.Pay.ASYNC, handle_payment_notify)
register_pay_notify_handle(TransactionType.PAYMENT, NotifyType.Pay.PAID_OUT, handle_paid_out)
# refund
register_refund_notify_handle(TransactionType.REFUND, NotifyType.Refund.ASYNC, handle_refund_notify)
register_refund_notify_handle(TransactionType.REFUND, NotifyType.Refund.REFUND_IN, handle_refund_in)
def init_register_notify_handles():
init_test_pay_notify_handles()
init_ali_pay_notify_handles()
init_lianlian_pay_notify_handles()
init_weixin_pay_notify_handles()
init_zyt_pay_notify_handles()
| 44.112
| 113
| 0.820638
|
e0d11782bf5ef80d02828843ae33a77f7b5c4017
| 2,440
|
py
|
Python
|
modules/selfserve/files/selfserve/lib/selfserve/keys.py
|
mshuler/infrastructure-puppet
|
bb054d08e89f9bf4b804a7a453f02ae722519d0a
|
[
"Apache-2.0"
] | 1
|
2019-06-09T10:25:04.000Z
|
2019-06-09T10:25:04.000Z
|
modules/selfserve/files/selfserve/lib/selfserve/keys.py
|
mshuler/infrastructure-puppet
|
bb054d08e89f9bf4b804a7a453f02ae722519d0a
|
[
"Apache-2.0"
] | 1
|
2020-05-08T07:07:43.000Z
|
2020-05-08T07:07:43.000Z
|
modules/selfserve/files/selfserve/lib/selfserve/keys.py
|
mshuler/infrastructure-puppet
|
bb054d08e89f9bf4b804a7a453f02ae722519d0a
|
[
"Apache-2.0"
] | 1
|
2019-12-31T07:28:19.000Z
|
2019-12-31T07:28:19.000Z
|
#!/usr/bin/python
#
# Library logic for selfserve ss2: interfacing with PGP.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
import gnupg
import logging
import tempfile
import time
import urllib2
from ss2config import *
import selfserve.exceptions
logger = logging.getLogger("%s.lib.keys" % LOGGER_NAME)
HTTP_NOT_FOUND = 404
def fetch_key(availid):
try:
return urllib2.urlopen(KEY_FOR_AVAILID_URL % availid).read()
except urllib2.HTTPError, he:
if he.getcode() == HTTP_NOT_FOUND:
return None
else:
raise
# TODO: this may need extension
def _fingerprint_for_gpg(fingerprint):
# Note: this works even if a slash is not present
slash = fingerprint.find('/')
return fingerprint[slash+1:].replace(' ', '')
def maybe_encrypt(plaintext, fingerprints, keys):
"""If possible, encrypt PLAINTEXT to the subset of the given KEYS that
are also present in FINGERPRINTS. Return the new text and a boolean
indicating whether encryption was done."""
# Can we encrypt?
if keys is None or fingerprints is None:
return (plaintext, False)
expiry = time.time() - 60 # one minute
homedir = tempfile.mkdtemp(suffix='.%d' % expiry, dir=STATE_DIR, prefix="selfserve-gnupghome.")
pgp = gnupg.GPG(gnupghome=homedir)
pgp.import_keys(keys)
fingerprints = map(_fingerprint_for_gpg, fingerprints)
ciphertext = pgp.encrypt(plaintext, fingerprints, always_trust=True)
if not ciphertext:
raise selfserve.exceptions.EncryptionError(ciphertext)
return (str(ciphertext), True)
| 34.857143
| 97
| 0.693852
|
fc0f6cfc3a19477d5ce38f677f26e9b61400ca72
| 2,162
|
py
|
Python
|
CIM16/IEC61970/Informative/InfWork/NonStandardItem.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | null | null | null |
CIM16/IEC61970/Informative/InfWork/NonStandardItem.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | null | null | null |
CIM16/IEC61970/Informative/InfWork/NonStandardItem.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 1
|
2021-04-02T18:04:49.000Z
|
2021-04-02T18:04:49.000Z
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM16.IEC61968.Common.Document import Document
class NonStandardItem(Document):
"""This document provides information for non-standard items like customer contributions (e.g., customer digs trench), vouchers (e.g., credit), and contractor bids.This document provides information for non-standard items like customer contributions (e.g., customer digs trench), vouchers (e.g., credit), and contractor bids.
"""
def __init__(self, code='', amount=0.0, *args, **kw_args):
"""Initialises a new 'NonStandardItem' instance.
@param code: The category of non-standard work.
@param amount: The projected cost for this item.
"""
#: The category of non-standard work.
self.code = code
#: The projected cost for this item.
self.amount = amount
super(NonStandardItem, self).__init__(*args, **kw_args)
_attrs = ["code", "amount"]
_attr_types = {"code": str, "amount": float}
_defaults = {"code": '', "amount": 0.0}
_enums = {}
_refs = []
_many_refs = []
| 45.041667
| 329
| 0.717391
|
416531d1cff99f2cccdc63cfee8d8b871aeea29a
| 817
|
py
|
Python
|
packages/qapi/qapi/middleware.py
|
lianxiaopang/camel-store-api
|
b8021250bf3d8cf7adc566deebdba55225148316
|
[
"Apache-2.0"
] | 12
|
2020-02-01T01:52:01.000Z
|
2021-04-28T15:06:43.000Z
|
packages/qapi/qapi/middleware.py
|
lianxiaopang/camel-store-api
|
b8021250bf3d8cf7adc566deebdba55225148316
|
[
"Apache-2.0"
] | 5
|
2020-02-06T08:07:58.000Z
|
2020-06-02T13:03:45.000Z
|
packages/qapi/qapi/middleware.py
|
lianxiaopang/camel-store-api
|
b8021250bf3d8cf7adc566deebdba55225148316
|
[
"Apache-2.0"
] | 11
|
2020-02-03T13:07:46.000Z
|
2020-11-29T01:44:06.000Z
|
"""
@author: 郭奕佳
@email: gyj@gzqichang.com
前端添加 Header 头:
X_HTTP_METHOD_OVERRIDE: ['GET', 'POST', 'PATCH', 'DELETE', 'PUT', 'HEAD', 'OPTION']
"""
from django.utils.deprecation import MiddlewareMixin
METHOD_OVERRIDE_HEADER = 'HTTP_X_HTTP_METHOD_OVERRIDE'
METHODS = ['GET', 'POST', 'PATCH', 'DELETE', 'PUT', 'HEAD', 'OPTION']
class MethodOverrideMiddleware(MiddlewareMixin):
"""
中间件添加顺序:
'django.middleware.csrf.CsrfViewMiddleware',
'qpi.middleware.MethodOverrideMiddleware',
"""
def process_view(self, request, callback, callback_args, callback_kwargs):
if METHOD_OVERRIDE_HEADER not in request.META:
return
if request.META[METHOD_OVERRIDE_HEADER] not in METHODS:
return
request.method = request.META[METHOD_OVERRIDE_HEADER]
| 30.259259
| 87
| 0.685435
|
cf78aa73523168423471b5e7d5b5cdc7163c5c0c
| 8,376
|
py
|
Python
|
exporters/committime/collector_bitbucket.py
|
caiomedeirospinto/pelorus
|
2cd21f11cb36b1d1cd34add6c7d23c13079d803c
|
[
"Apache-2.0"
] | 71
|
2019-11-27T19:36:42.000Z
|
2021-02-09T22:22:58.000Z
|
exporters/committime/collector_bitbucket.py
|
caiomedeirospinto/pelorus
|
2cd21f11cb36b1d1cd34add6c7d23c13079d803c
|
[
"Apache-2.0"
] | 176
|
2019-11-27T18:46:20.000Z
|
2021-02-15T14:39:21.000Z
|
exporters/committime/collector_bitbucket.py
|
caiomedeirospinto/pelorus
|
2cd21f11cb36b1d1cd34add6c7d23c13079d803c
|
[
"Apache-2.0"
] | 43
|
2019-12-11T20:43:58.000Z
|
2021-02-14T18:50:00.000Z
|
import json
import logging
import requests
from collector_base import AbstractCommitCollector
import pelorus
# import urllib3
# urllib3.disable_warnings()
class BitbucketCommitCollector(AbstractCommitCollector):
# globals for Bitbucket v1 API
V1_API_ROOT = "rest/api"
V1_API_TEST = "1.0/projects"
V1_API_PATTERN = "1.0/projects/{group}/repos/{project}/commits/{commit}"
# globals for Bitbucket v2 API
V2_API_ROOT = "api"
V2_API_TEST = "2.0/repositories"
V2_API_PATTERN = "2.0/repositories/{group}/{project}/commit/{commit}"
# Default http headers needed for API calls
DEFAULT_HEADERS = {"Content-Type": "application/json", "Accept": "application/json"}
def __init__(self, kube_client, username, token, namespaces, apps):
super().__init__(
kube_client,
username,
token,
namespaces,
apps,
"BitBucket",
"%Y-%m-%dT%H:%M:%S%z",
)
self.__server_dict = {}
self.__session = requests.Session()
# base class impl
def get_commit_time(self, metric):
"""Method called to collect data and send to Prometheus"""
git_server = metric.git_server
# do a simple check for hosted Git services.
if "github" in git_server or "gitlab" in git_server:
logging.warn("Skipping non BitBucket server, found %s" % (git_server))
return None
# Set the session auth
self.__session.auth = (self._username, self._token)
# Get or figure out the BB API version
# check the cache
api_version = self.get_api_version(git_server)
if api_version is None:
return metric
# get the project, group, and commit sha properties from the existing metric
project_name = metric.repo_project
sha = metric.commit_hash
group = metric.repo_group
try:
# set API variables depending on the version
if api_version == "1.0":
# start with the V1 globals
api_root = self.V1_API_ROOT
api_pattern = self.V1_API_PATTERN
# Due to the BB V1 git pattern differences, need remove '/scm' and parse again.
old_url = metric.repo_url
# Parse out the V1 /scm, for whatever reason why it is present.
new_url = old_url.replace("/scm", "")
# set the new url, so the parsing will happen
metric.repo_url = new_url
# set the new project name
project_name = metric.repo_project
# set the new group
group = metric.repo_group
# set the URL back to the original
metric.repo_url = old_url
elif api_version == "2.0":
# Just set the V2 globals
api_root = self.V2_API_ROOT
api_pattern = self.V2_API_PATTERN
# Create the API server from the Git server and API Root
api_server = pelorus.url_joiner(git_server, api_root)
# Finally, make the API call
api_response = self.get_commit_information(
api_pattern, api_server, group, project_name, sha, metric
)
# Check for a valid response and continue if none is found
if (
api_response is None
or api_response.status_code != 200
or api_response.text is None
):
logging.warning(
"Unable to retrieve commit time for build: %s, hash: %s, url: %s. Got http code: %s"
% (
metric.build_name,
metric.commit_hash,
metric.repo_fqdn,
str(api_response.status_code),
)
)
return metric
logging.debug("API call returned: %s" % (api_response.text))
api_j = json.loads(api_response.text)
if api_version == "2.0":
# API V2 only has the commit time, which needs to be converted.
# get the commit date/time
commit_time = api_j["date"]
logging.debug(
"API v2 returned sha: %s, commit date: %s" % (sha, str(commit_time))
)
# set the commit time from the API
metric.commit_time = commit_time
# set the timestamp after conversion
metric.commit_timestamp = pelorus.convert_date_time_to_timestamp(
metric.commit_time, self._timedate_format
)
else:
# API V1 has the commit timestamp, which does not need to be converted
commit_timestamp = api_j["committerTimestamp"]
logging.debug(
"API v1 returned sha: %s, timestamp: %s"
% (sha, str(commit_timestamp))
)
# Convert timestamp from miliseconds to seconds
converted_timestamp = commit_timestamp / 1000
# set the timestamp in the metric
metric.commit_timestamp = converted_timestamp
# convert the time stamp to datetime and set in metric
metric.commit_time = pelorus.convert_timestamp_to_date_time_str(
converted_timestamp
)
except Exception:
logging.error(
"Failed processing commit time for build %s" % metric.build_name,
exc_info=True,
)
raise
return metric
def get_commit_information(
self, api_pattern, api_server, group, project_name, sha, metric
):
"""Makes an API call to get the commit information"""
# Finally, make the API call
api_response = None
try:
# build the API path using, group, project and commit sha
path = api_pattern.format(group=group, project=project_name, commit=sha)
# create the full URL
url = pelorus.url_joiner(api_server, path)
# send a GET
response = self.__session.request(
"GET", url=url, headers=self.DEFAULT_HEADERS
)
response.encoding = "utf-8"
api_response = response
except Exception:
logging.warning(
"Failed to find project: %s, repo: %s for build %s"
% (metric.repo_url, project_name, metric.build_name),
exc_info=True,
)
return api_response
def get_api_version(self, git_server):
"""Checks the map for a the Git server API version. If not found it makes an API call to determine."""
api_version = self.__server_dict.get(git_server)
if api_version is None:
# cache miss, figure out the api
# try version 2.0
if self.check_api_verison(
self.__session, git_server, self.V2_API_ROOT, self.V2_API_TEST
):
self.__server_dict[git_server] = "2.0"
api_version = self.__server_dict.get(git_server)
else: # try version 1.0
if self.check_api_verison(
self.__session, git_server, self.V1_API_ROOT, self.V1_API_TEST
):
self.__server_dict[git_server] = "1.0"
api_version = self.__server_dict.get(git_server)
return api_version
def check_api_verison(self, session, git_server, api_root, api_test):
"""Makes an API call to determine the API version"""
try:
api_server = pelorus.url_joiner(git_server, api_root)
url = pelorus.url_joiner(api_server, api_test)
response = session.request("GET", url=url, headers=self.DEFAULT_HEADERS)
status_code = response.status_code
if status_code == 200:
return True
else:
logging.warning(
"Unable to retrieve API version for URL: %s . Got http code: %s"
% (url, str(status_code))
)
return False
except Exception:
return False
| 39.885714
| 111
| 0.560053
|
bd273dad89357d8d9880522aa0d9a55249c2bb28
| 1,367
|
py
|
Python
|
machine/qemu/sources/u-boot/test/py/tests/test_spl.py
|
muddessir/framework
|
5b802b2dd7ec9778794b078e748dd1f989547265
|
[
"MIT"
] | 1
|
2021-11-21T19:56:29.000Z
|
2021-11-21T19:56:29.000Z
|
machine/qemu/sources/u-boot/test/py/tests/test_spl.py
|
muddessir/framework
|
5b802b2dd7ec9778794b078e748dd1f989547265
|
[
"MIT"
] | null | null | null |
machine/qemu/sources/u-boot/test/py/tests/test_spl.py
|
muddessir/framework
|
5b802b2dd7ec9778794b078e748dd1f989547265
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: GPL-2.0
# Copyright 2020 Google LLC
# Written by Simon Glass <sjg@chromium.org>
import os.path
import pytest
def test_spl(u_boot_console, ut_spl_subtest):
"""Execute a "ut" subtest.
The subtests are collected in function generate_ut_subtest() from linker
generated lists by applying a regular expression to the lines of file
spl/u-boot-spl.sym. The list entries are created using the C macro
UNIT_TEST().
Strict naming conventions have to be followed to match the regular
expression. Use UNIT_TEST(foo_test_bar, _flags, foo_test) for a test bar in
test suite foo that can be executed via command 'ut foo bar' and is
implemented in C function foo_test_bar().
Args:
u_boot_console (ConsoleBase): U-Boot console
ut_subtest (str): SPL test to be executed (e.g. 'dm platdata_phandle')
"""
try:
cons = u_boot_console
cons.restart_uboot_with_flags(['-u', '-k', ut_spl_subtest.split()[1]])
output = cons.get_spawn_output().replace('\r', '')
assert 'Failures: 0' in output
finally:
# Restart afterward in case a non-SPL test is run next. This should not
# happen since SPL tests are run in their own invocation of test.py, but
# the cost of doing this is not too great at present.
u_boot_console.restart_uboot()
| 39.057143
| 80
| 0.695684
|
bcde82d6f5a899f4d2b02d5920590cc7e770936a
| 4,789
|
py
|
Python
|
rally/plugins/openstack/context/quotas/quotas.py
|
boris-42/rally
|
8da58ac92d36de736138240cc825a0423e11ff83
|
[
"Apache-2.0"
] | 1
|
2018-01-01T00:43:41.000Z
|
2018-01-01T00:43:41.000Z
|
rally/plugins/openstack/context/quotas/quotas.py
|
boris-42/rally
|
8da58ac92d36de736138240cc825a0423e11ff83
|
[
"Apache-2.0"
] | null | null | null |
rally/plugins/openstack/context/quotas/quotas.py
|
boris-42/rally
|
8da58ac92d36de736138240cc825a0423e11ff83
|
[
"Apache-2.0"
] | 1
|
2020-06-05T10:06:37.000Z
|
2020-06-05T10:06:37.000Z
|
# Copyright 2014: Dassault Systemes
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common.i18n import _
from rally.common import logging
from rally.common import validation
from rally import consts
from rally import osclients
from rally.plugins.openstack.context.quotas import cinder_quotas
from rally.plugins.openstack.context.quotas import designate_quotas
from rally.plugins.openstack.context.quotas import manila_quotas
from rally.plugins.openstack.context.quotas import neutron_quotas
from rally.plugins.openstack.context.quotas import nova_quotas
from rally.task import context
LOG = logging.getLogger(__name__)
@validation.add("required_platform", platform="openstack", admin=True)
@context.configure(name="quotas", platform="openstack", order=300)
class Quotas(context.Context):
"""Context class for updating benchmarks' tenants quotas."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"additionalProperties": False,
"properties": {
"nova": nova_quotas.NovaQuotas.QUOTAS_SCHEMA,
"cinder": cinder_quotas.CinderQuotas.QUOTAS_SCHEMA,
"manila": manila_quotas.ManilaQuotas.QUOTAS_SCHEMA,
"designate": designate_quotas.DesignateQuotas.QUOTAS_SCHEMA,
"neutron": neutron_quotas.NeutronQuotas.QUOTAS_SCHEMA
}
}
def __init__(self, ctx):
super(Quotas, self).__init__(ctx)
self.clients = osclients.Clients(
self.context["admin"]["credential"],
api_info=self.context["config"].get("api_versions"))
self.manager = {
"nova": nova_quotas.NovaQuotas(self.clients),
"cinder": cinder_quotas.CinderQuotas(self.clients),
"manila": manila_quotas.ManilaQuotas(self.clients),
"designate": designate_quotas.DesignateQuotas(self.clients),
"neutron": neutron_quotas.NeutronQuotas(self.clients)
}
self.original_quotas = []
def _service_has_quotas(self, service):
return len(self.config.get(service, {})) > 0
@logging.log_task_wrapper(LOG.info, _("Enter context: `quotas`"))
def setup(self):
for tenant_id in self.context["tenants"]:
for service in self.manager:
if self._service_has_quotas(service):
# NOTE(andreykurilin): in case of existing users it is
# required to restore original quotas instead of reset
# to default ones.
if "existing_users" in self.context:
self.original_quotas.append(
(service, tenant_id,
self.manager[service].get(tenant_id)))
self.manager[service].update(tenant_id,
**self.config[service])
def _restore_quotas(self):
for service, tenant_id, quotas in self.original_quotas:
try:
self.manager[service].update(tenant_id, **quotas)
except Exception as e:
LOG.warning("Failed to restore quotas for tenant %(tenant_id)s"
" in service %(service)s \n reason: %(exc)s" %
{"tenant_id": tenant_id, "service": service,
"exc": e})
def _delete_quotas(self):
for service in self.manager:
if self._service_has_quotas(service):
for tenant_id in self.context["tenants"]:
try:
self.manager[service].delete(tenant_id)
except Exception as e:
LOG.warning("Failed to remove quotas for tenant "
"%(tenant_id)s in service %(service)s "
"\n reason: %(exc)s"
% {"tenant_id": tenant_id,
"service": service, "exc": e})
@logging.log_task_wrapper(LOG.info, _("Exit context: `quotas`"))
def cleanup(self):
if self.original_quotas:
# existing users
self._restore_quotas()
else:
self._delete_quotas()
| 42.380531
| 79
| 0.608687
|
b323ddad18ecc1bc9757998892dc53a086d3f627
| 16,005
|
py
|
Python
|
training/network_training/nnUNetTrainerCascadeFullRes.py
|
rylezhou/sunet-pytorch
|
46473f4ba6ce442335f318b45aee50a357af92bf
|
[
"Apache-2.0"
] | null | null | null |
training/network_training/nnUNetTrainerCascadeFullRes.py
|
rylezhou/sunet-pytorch
|
46473f4ba6ce442335f318b45aee50a357af92bf
|
[
"Apache-2.0"
] | null | null | null |
training/network_training/nnUNetTrainerCascadeFullRes.py
|
rylezhou/sunet-pytorch
|
46473f4ba6ce442335f318b45aee50a357af92bf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing.pool import Pool
from time import sleep
import matplotlib
from postprocessing.connected_components import determine_postprocessing
from training.data_augmentation.default_data_augmentation import get_default_augmentation
from training.dataloading.dataset_loading import DataLoader3D, unpack_dataset
from evaluation.evaluator import aggregate_scores
from training.network_training.nnUNetTrainer import nnUNetTrainer
from network_architecture.neural_network import SegmentationNetwork
from paths import network_training_output_dir
from inference.segmentation_export import save_segmentation_nifti_from_softmax
from batchgenerators.utilities.file_and_folder_operations import *
import numpy as np
from utilities.one_hot_encoding import to_one_hot
import shutil
matplotlib.use("agg")
class nnUNetTrainerCascadeFullRes(nnUNetTrainer):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, previous_trainer="nnUNetTrainer", fp16=False):
super(nnUNetTrainerCascadeFullRes, self).__init__(plans_file, fold, output_folder, dataset_directory,
batch_dice, stage, unpack_data, deterministic, fp16)
self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, previous_trainer, fp16)
if self.output_folder is not None:
task = self.output_folder.split("/")[-3]
plans_identifier = self.output_folder.split("/")[-2].split("__")[-1]
folder_with_segs_prev_stage = join(network_training_output_dir, "3d_lowres",
task, previous_trainer + "__" + plans_identifier, "pred_next_stage")
if not isdir(folder_with_segs_prev_stage):
raise RuntimeError(
"Cannot run final stage of cascade. Run corresponding 3d_lowres first and predict the "
"segmentations for the next stage")
self.folder_with_segs_from_prev_stage = folder_with_segs_prev_stage
# Do not put segs_prev_stage into self.output_folder as we need to unpack them for performance and we
# don't want to do that in self.output_folder because that one is located on some network drive.
else:
self.folder_with_segs_from_prev_stage = None
def do_split(self):
super(nnUNetTrainerCascadeFullRes, self).do_split()
for k in self.dataset:
self.dataset[k]['seg_from_prev_stage_file'] = join(self.folder_with_segs_from_prev_stage,
k + "_segFromPrevStage.npz")
assert isfile(self.dataset[k]['seg_from_prev_stage_file']), \
"seg from prev stage missing: %s" % (self.dataset[k]['seg_from_prev_stage_file'])
for k in self.dataset_val:
self.dataset_val[k]['seg_from_prev_stage_file'] = join(self.folder_with_segs_from_prev_stage,
k + "_segFromPrevStage.npz")
for k in self.dataset_tr:
self.dataset_tr[k]['seg_from_prev_stage_file'] = join(self.folder_with_segs_from_prev_stage,
k + "_segFromPrevStage.npz")
def get_basic_generators(self):
self.load_dataset()
self.do_split()
if self.threeD:
dl_tr = DataLoader3D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,
True, oversample_foreground_percent=self.oversample_foreground_percent)
dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, True,
oversample_foreground_percent=self.oversample_foreground_percent)
else:
raise NotImplementedError
return dl_tr, dl_val
def process_plans(self, plans):
super(nnUNetTrainerCascadeFullRes, self).process_plans(plans)
self.num_input_channels += (self.num_classes - 1) # for seg from prev stage
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params['move_last_seg_chanel_to_data'] = True
self.data_aug_params['cascade_do_cascade_augmentations'] = True
self.data_aug_params['cascade_random_binary_transform_p'] = 0.4
self.data_aug_params['cascade_random_binary_transform_p_per_label'] = 1
self.data_aug_params['cascade_random_binary_transform_size'] = (1, 8)
self.data_aug_params['cascade_remove_conn_comp_p'] = 0.2
self.data_aug_params['cascade_remove_conn_comp_max_size_percent_threshold'] = 0.15
self.data_aug_params['cascade_remove_conn_comp_fill_with_other_class_p'] = 0.0
# we have 2 channels now because the segmentation from the previous stage is stored in 'seg' as well until it
# is moved to 'data' at the end
self.data_aug_params['selected_seg_channels'] = [0, 1]
# needed for converting the segmentation from the previous stage to one hot
self.data_aug_params['all_segmentation_labels'] = list(range(1, self.num_classes))
def initialize(self, training=True, force_load_plans=False):
"""
For prediction of test cases just set training=False, this will prevent loading of training data and
training batchgenerator initialization
:param training:
:return:
"""
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.setup_DA_params()
if self.folder_with_preprocessed_data is not None:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_default_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())))
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())))
else:
pass
self.initialize_network()
assert isinstance(self.network, SegmentationNetwork)
self.was_initialized = True
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True):
current_mode = self.network.training
self.network.eval()
assert self.was_initialized, "must initialize, ideally with checkpoint (or train first)"
if self.dataset_val is None:
self.load_dataset()
self.do_split()
if segmentation_export_kwargs is None:
if 'segmentation_export_params' in self.plans.keys():
force_separate_z = self.plans['segmentation_export_params']['force_separate_z']
interpolation_order = self.plans['segmentation_export_params']['interpolation_order']
interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z']
else:
force_separate_z = None
interpolation_order = 1
interpolation_order_z = 0
else:
force_separate_z = segmentation_export_kwargs['force_separate_z']
interpolation_order = segmentation_export_kwargs['interpolation_order']
interpolation_order_z = segmentation_export_kwargs['interpolation_order_z']
output_folder = join(self.output_folder, validation_folder_name)
maybe_mkdir_p(output_folder)
if do_mirroring:
mirror_axes = self.data_aug_params['mirror_axes']
else:
mirror_axes = ()
pred_gt_tuples = []
export_pool = Pool(2)
results = []
transpose_backward = self.plans.get('transpose_backward')
for k in self.dataset_val.keys():
properties = load_pickle(self.dataset[k]['properties_file'])
data = np.load(self.dataset[k]['data_file'])['data']
# concat segmentation of previous step
seg_from_prev_stage = np.load(join(self.folder_with_segs_from_prev_stage,
k + "_segFromPrevStage.npz"))['data'][None]
print(data.shape)
data[-1][data[-1] == -1] = 0
data_for_net = np.concatenate((data[:-1], to_one_hot(seg_from_prev_stage[0], range(1, self.num_classes))))
softmax_pred = self.predict_preprocessed_data_return_seg_and_softmax(data_for_net,
do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size,
use_gaussian=use_gaussian,
all_in_gpu=all_in_gpu,
mixed_precision=self.fp16)[1]
if transpose_backward is not None:
transpose_backward = self.plans.get('transpose_backward')
softmax_pred = softmax_pred.transpose([0] + [i + 1 for i in transpose_backward])
fname = properties['list_of_data_files'][0].split("/")[-1][:-12]
if save_softmax:
softmax_fname = join(output_folder, fname + ".npz")
else:
softmax_fname = None
"""There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray and will handle this automatically"""
if np.prod(softmax_pred.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save
np.save(fname + ".npy", softmax_pred)
softmax_pred = fname + ".npy"
results.append(export_pool.starmap_async(save_segmentation_nifti_from_softmax,
((softmax_pred, join(output_folder, fname + ".nii.gz"),
properties, interpolation_order, self.regions_class_order,
None, None,
softmax_fname, None, force_separate_z,
interpolation_order_z),
)
)
)
pred_gt_tuples.append([join(output_folder, fname + ".nii.gz"),
join(self.gt_niftis_folder, fname + ".nii.gz")])
_ = [i.get() for i in results]
task = self.dataset_directory.split("/")[-1]
job_name = self.experiment_name
_ = aggregate_scores(pred_gt_tuples, labels=list(range(self.num_classes)),
json_output_file=join(output_folder, "summary.json"), json_name=job_name,
json_author="Fabian", json_description="",
json_task=task)
if run_postprocessing_on_folds:
# in the old nnunet we would stop here. Now we add a postprocessing. This postprocessing can remove everything
# except the largest connected component for each class. To see if this improves results, we do this for all
# classes and then rerun the evaluation. Those classes for which this resulted in an improved dice score will
# have this applied during inference as well
self.print_to_log_file("determining postprocessing")
determine_postprocessing(self.output_folder, self.gt_niftis_folder, validation_folder_name,
final_subf_name=validation_folder_name + "_postprocessed", debug=debug)
# after this the final predictions for the vlaidation set can be found in validation_folder_name_base + "_postprocessed"
# They are always in that folder, even if no postprocessing as applied!
# detemining postprocesing on a per-fold basis may be OK for this fold but what if another fold finds another
# postprocesing to be better? In this case we need to consolidate. At the time the consolidation is going to be
# done we won't know what self.gt_niftis_folder was, so now we copy all the niftis into a separate folder to
# be used later
gt_nifti_folder = join(self.output_folder_base, "gt_niftis")
maybe_mkdir_p(gt_nifti_folder)
for f in subfiles(self.gt_niftis_folder, suffix=".nii.gz"):
success = False
attempts = 0
while not success and attempts < 10:
try:
shutil.copy(f, gt_nifti_folder)
success = True
except OSError:
attempts += 1
sleep(1)
self.network.train(current_mode)
export_pool.close()
export_pool.join()
| 55.189655
| 132
| 0.609684
|
b9323db58ef679811e899af1e093fc6572ee9d45
| 31,031
|
py
|
Python
|
arteryfe/artery_network.py
|
akdiem/bloodflow
|
b960c77b1b5430f2ef787e962108d1502d3ecb46
|
[
"BSD-3-Clause"
] | 12
|
2018-11-01T17:10:13.000Z
|
2022-03-18T06:04:25.000Z
|
arteryfe/artery_network.py
|
akdiem/bloodflow
|
b960c77b1b5430f2ef787e962108d1502d3ecb46
|
[
"BSD-3-Clause"
] | 4
|
2018-12-12T13:47:14.000Z
|
2021-04-08T16:01:34.000Z
|
arteryfe/artery_network.py
|
syverda/bloodflow
|
b960c77b1b5430f2ef787e962108d1502d3ecb46
|
[
"BSD-3-Clause"
] | 11
|
2019-07-25T08:20:53.000Z
|
2021-12-30T00:03:04.000Z
|
import sys
import numpy as np
import numpy.linalg as npl
import configparser
from dolfin import *
from arteryfe.artery import Artery
from arteryfe.utils import *
comm = mpi_comm_world().tompi4py()
class ArteryNetwork(object):
"""
Builds an artery network from the given parameters. Arteries in the network
are assigned indices from left to right and top to bottomself.
Arguments
---------
order : int
Number of arterial levels
rc : float
Characteristic radius (length)
qc : float
Characteristic flow
Ru : list of float
Upstream radii of each artery in the network
Rd : list of float
Downstream radii of each artery in the network
L : list of float
Vessel lengths
k1 : float
First constant from the relation Eh/r0
k2 : float
Second constant from the relation Eh/r0
k3 : float
Third constant from the relation Eh/R0
rho : float
Density of blood
Re : float
Reynolds' number
nu : float
Viscosity of blood
p0 : float
Diastolic pressure
R1 : float
First resistance from Windkessel model
R2 : float
Second resistance from Windkessel model
CT : float
Compliance from Windkessel model
"""
def __init__(self, param):
set_log_level(30)
order = param.param['order']
self.N = 2**order-1
if 'alpha' in param.param.keys():
Ru, Rd, L = self.build_geometry(param.param['order'],
param.param['Ru'], param.param['Rd'],
param.param['alpha'], param.param['L'],
param.param['R_term'])
param.param['Ru'] = Ru
param.param['Rd'] = Rd
param.param['L'] = L
self.nondim = nondimensionalise_parameters(param)
self.geo = param.geo
self.sol = param.solution
self.arteries = [0] * self.N
rc, qc, rho = self.nondim['rc'], self.nondim['qc'], self.nondim['rho']
Nt = self.geo['Nt']
self.T, self.q_ins = read_inlet(self.sol['inlet_flow_location'], Nt)
self.T = self.T*qc/rc**3
self.q_ins = self.q_ins/qc
self.dt = self.T/Nt
self.check_geometry()
for i in range(self.N):
root = (i==0)
leaf = False
if self.nondim['Ru'][i] == 0:
self.arteries[i] = None
else:
self.arteries[i] = Artery(i, self.T, self.nondim)
self.range_parent_arteries = list(range(self.N))
self.range_daughter_arteries = list(range(self.N))
self.range_leaf_arteries = list(range(self.N))
for i in range(self.N):
if i == 0:
# root is neither daughter nor leaf
self.range_daughter_arteries.remove(i)
self.range_leaf_arteries.remove(i)
self.arteries[i].root = True
elif self.arteries[i] is None:
# remove arteries that don't exist from lists
self.range_parent_arteries.remove(i)
self.range_daughter_arteries.remove(i)
self.range_leaf_arteries.remove(i)
else:
# Test if artery is parent
d1, d2 = self.daughter_arteries(i)
if d1 is None and d2 is None:
self.range_parent_arteries.remove(i)
self.arteries[i].leaf = True
# If artery is parent it's not a leaf
else:
self.range_leaf_arteries.remove(i)
self.arteries[i].leaf = False
# assign leaf boundary condition values
j = 0
for i in self.range_leaf_arteries:
if self.arteries[i] != None:
self.arteries[i].param['R1'] = self.nondim['R1'][j]
self.arteries[i].param['R2'] = self.nondim['R2'][j]
self.arteries[i].param['CT'] = self.nondim['CT'][j]
j += 1
self.define_geometry()
self.define_solution()
def daughter_arteries(self, i):
"""
Find and return the indices of the daughter arteries of artery i.
Arguments
---------
i : int
Index of the parent artery
Returns
-------
return : int
Daughter artery indices
"""
d1 = 2*i+1
d2 = 2*i+2
if d1 > self.N-1 or self.arteries[d1] is None:
d1 = None
if d2 > self.N-1 or self.arteries[d2] is None:
d2 = None
return d1, d2
def parent_artery(self, i):
"""
Find and return the index of the partent artery of artery i.
Arguments
---------
i : int
Index of the daughter artery
Returns
-------
return : int
Parent artery index
"""
#if i <= 0 or i >= 2**self.order:
# raise Exception('Vessel index out of range')
return (i-1)//2 # d1 is odd, d2=d1+1 is even
def sister_artery(self, i):
"""
Find and return the index of the sister artery of artery i.
Arguments
---------
i : int
Index of the artery
Returns
-------
return : int
Sister artery index
"""
if i%2 == 0:
return i-1
else:
return i+1
def build_geometry(self, order, Ru, Rd, alpha, L, R_term):
j = 0 # contains first artery ID on current level
k = 0
Ll = np.zeros(self.N)
Ll[0] = L * Ru[0]
for level in range(order):
for p in range(j, k):
d1 = 2*p+1
d2 = 2*p+2
Ru[d1] = alpha * Ru[p] * Ru[d1]
Ru[d2] = max(0, alpha * Ru[p]) * Ru[d2]
# check if the daughters are leaves
if 2**(level+1)-1 < len(Rd):
Rd[d1] = alpha * Rd[p] * Rd[d1]
Rd[d2] = max(0, alpha * Rd[p]) * Rd[d2]
else:
Rd[d1] = R_term
Rd[d2] = R_term
Ll[d1] = L * Ru[d1]
Ll[d2] = L * Ru[d2] - 0.01
j += int(2**(level-1))
k += int(2**level)
return Ru, Rd, Ll
def check_geometry(self):
order = self.nondim['order']
Ru = self.nondim['Ru']
Rd = self.nondim['Rd']
L = self.nondim['L']
R1 = self.nondim['R1']
R2 = self.nondim['R2']
CT = self.nondim['CT']
assert len(Ru) == self.N,\
"A network of order {} requires {} values for Ru, {} were provided".format(order, self.N, len(Ru))
assert len(Rd) == self.N,\
"A network of order {} requires {} values for Rd, {} were provided".format(order, self.N, len(Rd))
assert len(L) == self.N,\
"A network of order {} requires {} values for L, {} were provided".format(order, self.N, len(L))
if self.nondim['R1'] is list:
leaves = 2**(order-2)
assert len(R1) > leaves,\
"A network of order {} must have at least {} values for R1, {} were provided".format(order, leaves, len(R1))
assert len(R1) == len(R2),\
"R2 must have the same number of values as R1. {} != {}".format(len(R2), len(R1))
assert len(R1) == len(CT),\
"CT must have the same number of values as R1. {} != {}".format(len(CT), len(R2))
def define_geometry(self):
"""
Calls define_geometry() for each artery in the network.
Arguments
---------
geo : dict
Dictionary containing geometry parameters
"""
for artery in self.arteries:
if artery is not None:
artery.define_geometry(self.geo)
def define_solution(self):
"""
Calls define_solution() for each artery in the network.
Arguments
---------
sol : dict
Dictionary containing solution parameters
"""
theta = self.sol['theta']
q0 = self.q_ins[0]
self.arteries[0].define_solution(q0, theta)
for i in self.range_daughter_arteries:
if self.arteries[i] is not None:
p = self.parent_artery(i)
s = self.sister_artery(i)
q0 = self.arteries[i].A0(0)/(self.arteries[i].A0(0)\
+self.arteries[s].A0(0))\
* self.arteries[p].q0
self.arteries[i].define_solution(q0, theta)
self.x = np.ones([len(self.range_parent_arteries), 18])
def flux(self, a, U, x):
"""
Computes the flux term F(U) for a given artery a in the network.
Arguments
---------
a : Artery
Artery on which the flux term is computed
U : numpy.array
Value of solution
x : float
Point of evaluation
Returns
-------
return : numpy.array
Flux term F(U) for artery a at point x
"""
return np.array([U[1], U[1]**2 + a.f(x)*np.sqrt(a.A0(x)*U[0])])
def source(self, a, U, x):
"""
Computes the source term S(U) for a given artery a in the network.
Arguments
---------
a : Artery
Artery on which the flux term is computed
U : numpy.array
Value of solution
x : float
Point of evaluation
Returns
-------
return : numpy.array
Source term S(U) for artery a at point x
"""
S1 = 0
S2 = -2*np.sqrt(np.pi)/a.db/a.param['Re']*U[1]/np.sqrt(U[0])\
+ (2*np.sqrt(U[0])*(np.sqrt(np.pi)*a.f(x)\
+np.sqrt(a.A0(x))*a.dfdr(x))\
-U[0]*a.dfdr(x))*a.drdx(x)
return np.array([S1, S2])
def compute_U_half(self, a, x0, x1, U0, U1):
"""
Computes the solution for a given artery a in the network at a half
time step.
Arguments
---------
a : Artery
Artery on which the flux term is computed
x0 : float
Left point
x1 : float
Right point
U0 : numpy.array
Value of solution at x0
U1 : numpy.array
Value of solution at x1
x : float
Point of evaluation
Returns
-------
return : numpy.array
Solution for artery a at the middle point after half a time step
"""
# Value of terms at time t_n
F0, S0 = self.flux(a, U0, x0), self.source(a, U0, x0)
F1, S1 = self.flux(a, U1, x1), self.source(a, U1, x1)
return (U0+U1)/2 - a.dt/(x1-x0)*(F1-F0) + a.dt/4*(S0+S1)
def windkessel(self, a, k_max=100, tol=1.0e-12):
"""
Computes the area for artery a at the outlet
Arguments
---------
a : Artery
Artery on which the flux term is computed
k_max : int
Maximum number of iterations in Piccards scheme
tol : float
Tolerance for Piccards fixed point iteration scheme
Returns
-------
return : float
Outlet boundary value of A at time step t_(n+1)
"""
aL = a.param['L']
a.adjust_dex(aL, a.Un(aL)[0], a.Un(aL)[1])
# Spatial step, scaled to satisfy the CFL condition
x2, x1, x0 = aL-2*a.dex, aL-a.dex, aL
x21, x10 = aL-1.5*a.dex, aL-0.5*a.dex
Um2, Um1, Um0 = a.Un(x2), a.Un(x1), a.Un(x0)
# Values at time step n
Fm2, Sm2 = self.flux(a, Um2, x2), self.source(a, Um2, x2)
Fm1, Sm1 = self.flux(a, Um1, x1), self.source(a, Um1, x1)
Fm0, Sm0 = self.flux(a, Um0, x0), self.source(a, Um0, x0)
# Values at time step n+1/2
U_half_21 = self.compute_U_half(a, x2, x1, Um2, Um1)
U_half_10 = self.compute_U_half(a, x1, x0, Um1, Um0)
F_half_21 = self.flux(a, U_half_21, x21)
S_half_21 = self.source(a, U_half_21, x21)
F_half_10 = self.flux(a, U_half_10, x10)
S_half_10 = self.source(a, U_half_10, x10)
# Value at time step n+1
qm1 = Um1[1]\
- a.dt/a.dex*(F_half_10[1]-F_half_21[1])\
+ a.dt/2*(S_half_10[1]+S_half_21[1])
# Fixed point iteration
pn = a.compute_outlet_pressure(Um0[0])
p = pn
R1 = a.param['R1']*0.3
R2 = a.param['R2']*0.01
CT = a.param['CT']
for k in range(k_max):
p_old = p
qm0 = Um0[1]\
+ (p-pn)/R1\
+ self.dt/R1/R2/CT*pn\
- self.dt*(R1+R2)/R1/R2/CT*Um0[1]
Am0 = Um0[0] - self.dt/a.dex*(qm0-qm1)
p = a.compute_outlet_pressure(Am0)
if abs(p-p_old) < tol:
break
return Am0
def structured_tree(self, a):
N = int(1/self.dt)+1
pn = p_term
for k in range(N):
# pn += impedance_weight()*
pass
pass
def initial_x(self, p, d1, d2):
"""
Computes an initial guess for x at a bifurcation point. Set same value
at time t_(n+1) and t_(n+1/2) as time t_n. At point M+-1/2, set same
value as in point M.
Arguments
---------
p : Artery
Parent artery in the bifurcation
d1 : Artery
First daughter artery in the bifurcation
d2 : Artery
Second daughter artery in the bifurcation
Returns
-------
return : numpy.array
Initial guess for the 18 variables at a bifurcation
"""
x = np.zeros(18)
x[:3] = p.q0*np.ones(3)
x[3:6] = d1.q0*np.ones(3)
x[6:9] = d2.q0*np.ones(3)
x[9:12] = p.A0(p.param['L'])*np.ones(3)
x[12:15] = d1.A0(0)*np.ones(3)
x[15:] = d2.A0(0)*np.ones(3)
return x
def define_x(self):
"""
Calls initial_x() for each bifurcation in the artery network
"""
for ip in self.range_parent_arteries:
i1, i2 = self.daughter_arteries(ip)
p, d1, d2 = self.arteries[ip], self.arteries[i1], self.arteries[i2]
# Map artery index to range_parent_arteries index
i = self.range_parent_arteries.index(ip)
self.x[i] = self.initial_x(p, d1, d2)
def problem_function(self, p, d1, d2, x):
"""
Computes the solution f(x) = y of the system of equations at a
bifurcation.
Arguments
---------
p : Artery
Parent artery in the bifurcation
d1 : Artery
First daughter artery in the bifurcation
d2 : Artery
Second daughter artery in the bifurcation
x : numpy.array
Current solution
Returns
-------
return : numpy.array
Function value f(x)
"""
# Abbreviations
pL = p.param['L']
A0p, A01, A02 = p.A0(pL), d1.A0(0), d2.A0(0)
fp, f1, f2 = p.f(pL), d1.f(0), d2.f(0)
# Ghost half terms
Fp = self.flux(p, np.array([x[11], x[2]]), pL+p.dex/2)
F1 = self.flux(d1, np.array([x[14], x[5]]), -d1.dex/2)
F2 = self.flux(d2, np.array([x[17], x[8]]), -d2.dex/2)
Sp = self.source(p, np.array([x[11], x[2]]), pL+p.dex/2)
S1 = self.source(d1, np.array([x[14], x[5]]), -d1.dex/2)
S2 = self.source(d2, np.array([x[17], x[8]]), -d2.dex/2)
# Compute half-time-step-values in M-1/2 for p and 1/2 for d1 and d2
Um1p, Um0p = p.Un(pL-p.dex), p.Un(pL)
U0d1, U1d1 = d1.Un(0), d1.Un(d1.dex)
U0d2, U1d2 = d2.Un(0), d2.Un(d2.dex)
U_half_p = self.compute_U_half(p, pL-p.dex, pL, Um1p, Um0p)
U_half_1 = self.compute_U_half(d1, 0, d1.dex, U0d1, U1d1)
U_half_2 = self.compute_U_half(d2, 0, d2.dex, U0d2, U1d2)
F_half_p = self.flux(p, U_half_p, pL-p.dex/2)
S_half_p = self.source(p, U_half_p, pL-p.dex/2)
F_half_1 = self.flux(d1, U_half_1, d1.dex/2)
S_half_1 = self.source(d1, U_half_1, d1.dex/2)
F_half_2 = self.flux(d2, U_half_2, d2.dex/2)
S_half_2 = self.source(d2, U_half_2, d2.dex/2)
# Function return array
y = np.zeros(18)
# Entries from equation (20)
y[0] = 2*x[1] - U_half_p[1] - x[2]
y[1] = 2*x[4] - x[5] - U_half_1[1]
y[2] = 2*x[7] - x[8] - U_half_2[1]
# Entries from equation (21)
y[3] = 2*x[10] - U_half_p[0] - x[11]
y[4] = 2*x[13] - x[14] - U_half_1[0]
y[5] = 2*x[16] - x[17] - U_half_2[0]
# Entries from equation (22)
y[6] = x[0] - x[3] - x[6]
y[7] = x[1] - x[4] - x[7]
# Entries from equation (23)
y[8] = fp*(1-np.sqrt(A0p/x[10])) - f1*(1-np.sqrt(A01/x[13]))
y[9] = fp*(1-np.sqrt(A0p/x[10])) - f2*(1-np.sqrt(A02/x[16]))
y[10] = fp*(1-np.sqrt(A0p/x[9])) - f1*(1-np.sqrt(A01/x[12]))
y[11] = fp*(1-np.sqrt(A0p/x[9])) - f2*(1-np.sqrt(A02/x[15]))
# Entries from equation (26)
y[12] = x[0] - Um0p[1] + p.dt/p.dex*(Fp[1] - F_half_p[1])\
- p.dt/2*(Sp[1] + S_half_p[1])
y[13] = x[3] - U0d1[1] + d1.dt/d1.dex*(F_half_1[1] - F1[1])\
- d1.dt/2*(S_half_1[1] + S1[1])
y[14] = x[6] - U0d2[1] + d2.dt/d2.dex*(F_half_2[1] - F2[1])\
- d2.dt/2*(S_half_2[1] + S2[1])
# Entries from equation (27)
y[15] = x[9] - Um0p[0] + p.dt/p.dex*(Fp[0] - F_half_p[0])
y[16] = x[12] - U0d1[0] + d1.dt/d1.dex*(F_half_1[0] - F1[0])
y[17] = x[15] - U0d2[0] + d2.dt/d2.dex*(F_half_2[0] - F2[0])
return y
def jacobian(self, p, d1, d2, x):
"""
Computes the analytical Jacobian of the system of equations at a
bifurcation.
Arguments
---------
p : Artery
Parent artery in the bifurcation
d1 : Artery
First daughter artery in the bifurcation
d2 : Artery
Second daughter artery in the bifurcation
x : numpy.array
Current solution
Returns
-------
return : numpy.array
Jacobian matrix
"""
# Abbreviations
pL = p.param['L']
A0p, A01, A02 = p.A0(pL), d1.A0(0), d2.A0(0)
A0hp, A0h1, A0h2 = p.A0(pL+p.dex), d1.A0(-d1.dex), d2.A0(-d2.dex)
fp, f1, f2 = p.f(pL), d1.f(0), d2.f(0)
fhp, fh1, fh2 = p.f(pL+p.dex), d1.f(-d1.dex), d2.f(-d2.dex)
dbp, db1, db2 = p.db, d1.db, d2.db
Rep, Re1, Re2 = p.param['Re'], d1.param['Re'], d2.param['Re']
dfdrp, dfdr1, dfdr2 = p.dfdr(pL+p.dex),\
d1.dfdr(-d1.dex), d2.dfdr(-d2.dex)
drdxp, drdx1, drdx2 = p.drdx(pL+p.dex),\
d1.drdx(-d1.dex), d2.drdx(-d2.dex)
rpi = np.sqrt(np.pi)
# Jacobian matrix
J = np.zeros([18, 18])
# Entries from equation (20)
J[0, 1] = 2
J[0, 2] = -1
J[1, 4] = 2
J[1, 5] = -1
J[2, 7] = 2
J[2, 8] = -1
# Entries from equation (21)
J[3, 10] = 2
J[3, 11] = -1
J[4, 13] = 2
J[4, 14] = -1
J[5, 16] = 2
J[5, 17] = -1
# Entries from equation (22)
J[6, 0] = 1
J[6, 3] = -1
J[6, 6] = -1
J[7, 1] = 1
J[7, 4] = -1
J[7, 7] = -1
# Entries from equation (23)
J[8, 10] = fp*np.sqrt(A0p)/2/x[10]**(3/2)
J[8, 13] = -f1*np.sqrt(A01)/2/x[13]**(3/2)
J[9, 10] = fp*np.sqrt(A0p)/2/x[10]**(3/2)
J[9, 16] = -f2*np.sqrt(A02)/2/x[16]**(3/2)
J[10, 9] = fp*np.sqrt(A0p)/2/x[9]**(3/2)
J[10, 12] = -f1*np.sqrt(A01)/2/x[12]**(3/2)
J[11, 9] = fp*np.sqrt(A0p)/2/x[9]**(3/2)
J[11, 15] = -f2*np.sqrt(A02)/2/x[15]**(3/2)
# Entries from equation (26)
J[12, 0] = 1
J[12, 2] = p.dt/p.dex*2*x[2]/x[11] + p.dt*rpi/dbp/Rep/np.sqrt(x[11])
J[12, 11] = p.dt/p.dex*(-(x[2]/x[11])**2 + fhp/2*np.sqrt(A0hp/x[11]))\
- p.dt/2*(rpi/dbp/Rep*x[2]/x[11]**(3/2)\
+ (1/np.sqrt(x[11])*(rpi*fhp+np.sqrt(A0hp)*dfdrp)\
- dfdrp)*drdxp)
J[13, 3] = 1
J[13, 5] = -d1.dt/d1.dex*2*x[5]/x[14] + d1.dt*rpi/db1/Re1/np.sqrt(x[14])
J[13, 14] = d1.dt/d1.dex*((x[5]/x[14])**2 - fh1/2*np.sqrt(A0h1/x[14]))\
- d1.dt/2*(rpi/db1/Re1*x[5]/x[14]**(3/2)\
+ (1/np.sqrt(x[14])*(rpi*fh1+np.sqrt(A0h1)*dfdr1)\
- dfdr1)*drdx1)
J[14, 6] = 1
J[14, 8] = -d2.dt/d2.dex*2*x[8]/x[17] + d2.dt*rpi/db2/Re2/np.sqrt(x[17])
J[14, 17] = d2.dt/d2.dex*((x[8]/x[17])**2 - fh2/2*np.sqrt(A0h2/x[17]))\
- d2.dt/2*(rpi/db2/Re2*x[8]/x[17]**(3/2)\
+ (1/np.sqrt(x[17])*(rpi*fh2+np.sqrt(A0h2)*dfdr2)\
- dfdr2)*drdx2)
# Entries from equation (27)
J[15, 2] = p.dt/p.dex
J[15, 9] = 1
J[16, 5] = - d1.dt/d1.dex
J[16, 12] = 1
J[17, 8] = - d1.dt/d1.dex
J[17, 15] = 1
return J
def newton(self, p, d1, d2, x=np.ones(18), k_max=30, tol=1.e-10):
"""
Computes the solution of the system of equations at a bifurcation
using Newton's method.
Arguments
---------
p : Artery
Parent artery in the bifurcation
d1 : Artery
First daughter artery in the bifurcation
d2 : Artery
Second daughter artery in the bifurcation
x : numpy.array
Current solution
k_max: int
Maximum number of iterations
tol : float
Tolerance for the solution
Returns
-------
return : numpy.array
Solution to the system of equations
"""
for k in range(k_max):
J = self.jacobian(p, d1, d2, x)
func = self.problem_function(p, d1, d2, x)
if npl.norm(func) < tol:
break
try:
x -= npl.solve(J, func)
except npl.LinAlgError:
print('Singular')
eps = 1.e-6 # Perturbation value
J += eps*np.eye(18)
func[0] += eps
x -= npl.solve(J, func)
return x
def adjust_bifurcation_step(self, p, d1, d2, margin=0.05):
"""
Chooses spatial step at a bifurcation to respect the CFL condition
for all three arteries
Arguments
---------
p : Artery
Parent artery in the bifurcation
d1 : Artery
First daughter artery in the bifurcation
d2 : Artery
Second daughter artery in the bifurcation
margin : float
Margin of CFL number
"""
# p_q = p.Un.vector().gather_on_zero()
pL = p.param['L']
Mp = p.CFL_term(pL, p.Un(pL)[0], p.Un(pL)[1])
M1 = d1.CFL_term(0, d1.Un(0)[0], d1.Un(0)[1])
M2 = d2.CFL_term(0, d2.Un(0)[0], d2.Un(0)[1])
# from IPython import embed; embed()
# dex is chosen to respect all three CFL-conditions
p.dex = d1.dex = d2.dex = (1+margin)*self.dt/min([Mp, M1, M2])
def set_inner_bc(self, ip, i1, i2):
"""
Calls newton() for each bifurcation to calculate the boundary values
for each artery at the bifurcation.
Arguments
---------
ip : int
Parent artery index in the bifurcation
i1 : int
First daughter artery index in the bifurcation
i2 : int
Second daughter artery index in the bifurcation
"""
p, d1, d2 = self.arteries[ip], self.arteries[i1], self.arteries[i2]
self.adjust_bifurcation_step(p, d1, d2)
# Map artery index to range_parent_arteries index
i = self.range_parent_arteries.index(ip)
self.x[i] = self.newton(p, d1, d2, self.x[i])
p.U_out = [self.x[i, 9], self.x[i, 0]]
d1.U_in = [self.x[i, 12], self.x[i, 3]]
d2.U_in = [self.x[i, 15], self.x[i, 6]]
def set_bcs(self, q_in):
"""
Updates all boundary values using the appropriate boundary conditions.
Arguments
---------
q_in : float
Inflow rate in the root vessel at time t_(n+1)
"""
# Update inlet boundary conditions
self.arteries[0].q_in = q_in
# Update bifurcation boundary conditions
for ip in self.range_parent_arteries:
i1, i2 = self.daughter_arteries(ip)
self.set_inner_bc(ip, i1, i2)
# Update outlet boundary conditions
for i in self.range_leaf_arteries:
self.arteries[i].A_out = self.windkessel(self.arteries[i])
def dump_metadata(self):
"""
Save metadata for the interpretation of XDMF files.
Arguments
---------
Nt_store : int
Number of time steps to store
N_cycles_store : int
Number of cardiac cycles to store
store_area : boolean
Store area if True
store_pressure : boolean
Store pressure if True
"""
# Assemble strings
mesh_locations = ''
output_location = self.sol['output_location']
for artery in self.arteries:
if artery is not None:
i = artery.i
mesh_location = output_location + '/mesh_%i.h5' % (i)
# Save mesh
f = HDF5File(mpi_comm_world(), mesh_location, 'w')
f.write(artery.mesh, "/mesh")
f.close()
if i > 0:
mesh_locations += ','
mesh_locations += mesh_location
L = ''
for artery in self.arteries[:-1]:
if artery is not None:
L += str(artery.param['L'])+','
if artery is not None:
L += str(self.arteries[-1].param['L'])
names = ''
locations = ''
names += 'flow'
locations += output_location + '/flow'
if self.sol['store_area']:
names += ',area'
locations += ',' + output_location + '/area'
if self.sol['store_pressure']:
names += ',pressure'
locations += ',' + output_location + '/pressure'
# Save metadata
N_cycles = self.geo['N_cycles']
N_cycles_store = self.sol['N_cycles_store']
config = configparser.RawConfigParser()
config.add_section('data')
config.set('data', 'order', str(self.nondim['order']))
config.set('data', 'Nx', str(self.geo['Nx']))
config.set('data', 'Nt',
str(self.sol['Nt_store']*N_cycles_store))
config.set('data', 'T0', str(self.T*(N_cycles-N_cycles_store)))
config.set('data', 'T', str(self.T*N_cycles))
config.set('data', 'L', str(self.nondim['L'])[1:-1])
config.set('data', 'rc', str(self.nondim['rc']))
config.set('data', 'qc', str(self.nondim['qc']))
config.set('data', 'rho', str(self.nondim['rho']))
config.set('data', 'mesh_locations', mesh_locations)
config.set('data', 'names', names)
config.set('data', 'locations', locations)
with open(output_location+'/data.cfg', 'w') as configfile:
config.write(configfile)
def solve(self):
"""
Call solve for each artery in the network.
Arguments
---------
q_ins : numpy.array
Array containing inflow rates for the root artery at every time step
Nt_store : int
Number of time steps to store
N_cycles_store : int
Number of cardiac cycles to store
store_area : boolean
Store area if True
store_pressure : boolean
Store pressure if True
"""
self.define_x()
# Store parameters necessary for postprocessing
self.dump_metadata()
store_area = self.sol['store_area']
store_pressure = self.sol['store_pressure']
output_location = self.sol['output_location']
# Setup storage files
xdmffile_flow = [0] * self.N
if store_area:
xdmffile_area = [0] * self.N
if store_pressure:
xdmffile_pressure = [0] * self.N
for artery in self.arteries:
if artery is not None:
i = artery.i
xdmffile_flow[i] = XDMFFile('%s/flow/flow_%i.xdmf'\
% (output_location, i))
if store_area:
xdmffile_area[i] = XDMFFile('%s/area/area_%i.xdmf'\
% (output_location, i))
if store_pressure:
xdmffile_pressure[i] = XDMFFile('%s/pressure/pressure_%i.xdmf'\
% (output_location, i))
# Initialise time
t = 0
# Cardiac cycle iteration
N_cycles = self.geo['N_cycles']
Nt = self.geo['Nt']
N_cycles_store = self.sol['N_cycles_store']
Nt_store = self.sol['Nt_store']
for n_cycle in range(N_cycles):
# Time-stepping for one period
for n in range(Nt):
print_progress(n_cycle, n, n_cycle*Nt+n)
# Apply boundary conditions for time t_(n+1)
self.set_bcs(self.q_ins[(n+1) % (Nt)])
# Solve equation on each artery
for i, artery in enumerate(self.arteries):
if artery is not None:
# Store solution at time t_n
cycle_store = (n_cycle >= N_cycles-N_cycles_store)
if cycle_store and n % (Nt/Nt_store) == 0:
# Split solution for storing, with deepcopy
area, flow = artery.Un.split(True)
write_file(xdmffile_flow[i], flow, 'flow', t)
if store_area:
write_file(xdmffile_area[i], area, 'area', t)
if store_pressure:
artery.update_pressure()
write_file(xdmffile_pressure[i], artery.pn, 'pressure', t)
# Solve problem on artery for time t_(n+1)
artery.solve()
# Update current solution on artery
artery.update_solution()
t += self.dt
| 32.767687
| 124
| 0.494957
|
20e5311e8cc4b231933ba611d6e1e3896b5f9682
| 1,376
|
py
|
Python
|
tests/test_models.py
|
seung-lab/MaterializationEngine
|
b0dbda304db3f90c3e619edace6328dcf1b33f94
|
[
"MIT"
] | null | null | null |
tests/test_models.py
|
seung-lab/MaterializationEngine
|
b0dbda304db3f90c3e619edace6328dcf1b33f94
|
[
"MIT"
] | 14
|
2021-05-28T00:05:37.000Z
|
2022-03-22T18:37:27.000Z
|
tests/test_models.py
|
seung-lab/MaterializationEngine
|
b0dbda304db3f90c3e619edace6328dcf1b33f94
|
[
"MIT"
] | null | null | null |
from materializationengine.models import AnalysisTable, AnalysisVersion
def test_analysis_version(mat_metadata):
analysisversion = AnalysisVersion(
datastack=mat_metadata["datastack"],
time_stamp=mat_metadata["timestamp"],
version=mat_metadata["version"],
valid=False,
expires_on=mat_metadata["expires_timestamp"],
)
assert analysisversion.datastack == mat_metadata["datastack"]
assert analysisversion.time_stamp == mat_metadata["timestamp"]
assert analysisversion.version == mat_metadata["version"]
assert analysisversion.valid == False
assert analysisversion.expires_on == mat_metadata["expires_timestamp"]
def test_analysis_table(mat_metadata):
analysis_table = AnalysisTable(
aligned_volume=mat_metadata["aligned_volume"],
schema=mat_metadata["schema"],
table_name=mat_metadata["annotation_table_name"],
valid=True,
created=mat_metadata["timestamp"],
analysisversion_id=mat_metadata["version"],
)
assert analysis_table.aligned_volume == mat_metadata["aligned_volume"]
assert analysis_table.schema == mat_metadata["schema"]
assert analysis_table.table_name == mat_metadata["annotation_table_name"]
assert analysis_table.created == mat_metadata["timestamp"]
assert analysis_table.analysisversion_id == mat_metadata["version"]
| 41.69697
| 77
| 0.742733
|
c78b6954506278840b89405ab14199195f3ac1f0
| 899
|
py
|
Python
|
.modules/.sqlmap/lib/utils/versioncheck.py
|
termux-one/EasY_HaCk
|
0a8d09ca4b126b027b6842e02fa0c29d8250e090
|
[
"Apache-2.0"
] | 1,103
|
2018-04-20T14:08:11.000Z
|
2022-03-29T06:22:43.000Z
|
.modules/.sqlmap/lib/utils/versioncheck.py
|
sshourya948/EasY_HaCk
|
0a8d09ca4b126b027b6842e02fa0c29d8250e090
|
[
"Apache-2.0"
] | 29
|
2019-04-03T14:52:38.000Z
|
2022-03-24T12:33:05.000Z
|
.modules/.sqlmap/lib/utils/versioncheck.py
|
sshourya948/EasY_HaCk
|
0a8d09ca4b126b027b6842e02fa0c29d8250e090
|
[
"Apache-2.0"
] | 161
|
2018-04-20T15:57:12.000Z
|
2022-03-15T19:16:16.000Z
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2018 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import sys
import time
PYVERSION = sys.version.split()[0]
if PYVERSION >= "3" or PYVERSION < "2.6":
exit("[%s] [CRITICAL] incompatible Python version detected ('%s'). To successfully run sqlmap you'll have to use version 2.6.x or 2.7.x (visit 'https://www.python.org/downloads/')" % (time.strftime("%X"), PYVERSION))
errors = []
extensions = ("bz2", "gzip", "pyexpat", "ssl", "sqlite3", "zlib")
for _ in extensions:
try:
__import__(_)
except ImportError:
errors.append(_)
if errors:
errMsg = "missing one or more core extensions (%s) " % (", ".join("'%s'" % _ for _ in errors))
errMsg += "most likely because current version of Python has been "
errMsg += "built without appropriate dev packages"
exit(errMsg)
| 31
| 220
| 0.655172
|
c045bffa95b29e069831b548701b76d1b8e76c0d
| 1,296
|
py
|
Python
|
official/modeling/activations/gelu.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 153
|
2020-10-25T13:58:04.000Z
|
2022-03-07T06:01:54.000Z
|
official/modeling/activations/gelu.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 21
|
2021-08-31T08:34:50.000Z
|
2022-03-17T11:42:10.000Z
|
official/modeling/activations/gelu.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 39
|
2021-07-02T00:46:14.000Z
|
2022-03-13T16:59:55.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gaussian error linear unit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package='Text')
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(math.sqrt(2 / math.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
| 31.609756
| 80
| 0.686728
|
384e0e43bf1233c9f1606bd55419da3a5c214d9f
| 10,139
|
py
|
Python
|
Software/Python/grove_barometer_sensors/barometric_sensor_bmp180/grove_i2c_barometic_sensor_BMP180.py
|
joemarshall/GrovePi
|
d128ab734f216f5b2d82c1b629cfca3f31af4993
|
[
"MIT"
] | 7
|
2015-03-23T20:30:58.000Z
|
2018-04-25T07:48:46.000Z
|
Software/Python/grove_barometer_sensors/barometric_sensor_bmp180/grove_i2c_barometic_sensor_BMP180.py
|
joemarshall/GrovePi
|
d128ab734f216f5b2d82c1b629cfca3f31af4993
|
[
"MIT"
] | 4
|
2015-02-07T17:37:58.000Z
|
2017-07-30T10:14:29.000Z
|
Software/Python/grove_barometer_sensors/barometric_sensor_bmp180/grove_i2c_barometic_sensor_BMP180.py
|
joemarshall/GrovePi
|
d128ab734f216f5b2d82c1b629cfca3f31af4993
|
[
"MIT"
] | 7
|
2015-08-29T13:52:42.000Z
|
2021-12-29T15:10:19.000Z
|
#!/usr/bin/python
# Copyright 2014 Johan Vandewalle. All rights reserved.
#
# Redistributian and use in source and binary forms, with or without
# modification, are permitted provide that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
from Adafruit_I2C import Adafruit_I2C
import math
# ===========================================================================
# BMP085 Class
# ===========================================================================
class BMP085 :
i2c = None
# Operating Modes
__BMP085_ULTRALOWPOWER = 0
__BMP085_STANDARD = 1
__BMP085_HIGHRES = 2
__BMP085_ULTRAHIGHRES = 3
# BMP085 Registers
__BMP085_CAL_AC1 = 0xAA # R Calibration data (16 bits)
__BMP085_CAL_AC2 = 0xAC # R Calibration data (16 bits)
__BMP085_CAL_AC3 = 0xAE # R Calibration data (16 bits)
__BMP085_CAL_AC4 = 0xB0 # R Calibration data (16 bits)
__BMP085_CAL_AC5 = 0xB2 # R Calibration data (16 bits)
__BMP085_CAL_AC6 = 0xB4 # R Calibration data (16 bits)
__BMP085_CAL_B1 = 0xB6 # R Calibration data (16 bits)
__BMP085_CAL_B2 = 0xB8 # R Calibration data (16 bits)
__BMP085_CAL_MB = 0xBA # R Calibration data (16 bits)
__BMP085_CAL_MC = 0xBC # R Calibration data (16 bits)
__BMP085_CAL_MD = 0xBE # R Calibration data (16 bits)
__BMP085_CONTROL = 0xF4
__BMP085_TEMPDATA = 0xF6
__BMP085_PRESSUREDATA = 0xF6
__BMP085_READTEMPCMD = 0x2E
__BMP085_READPRESSURECMD = 0x34
# Private Fields
_cal_AC1 = 0
_cal_AC2 = 0
_cal_AC3 = 0
_cal_AC4 = 0
_cal_AC5 = 0
_cal_AC6 = 0
_cal_B1 = 0
_cal_B2 = 0
_cal_MB = 0
_cal_MC = 0
_cal_MD = 0
# Constructor
def __init__(self, address=0x77, mode=1, debug=False):
self.i2c = Adafruit_I2C(address)
self.address = address
self.debug = debug
# Make sure the specified mode is in the appropriate range
if ((mode < 0) | (mode > 3)):
if (self.debug):
print("Invalid Mode: Using STANDARD by default")
self.mode = self.__BMP085_STANDARD
else:
self.mode = mode
# Read the calibration data
self.readCalibrationData()
def readS16(self, register):
"Reads a signed 16-bit value"
hi = self.i2c.readS8(register)
lo = self.i2c.readU8(register+1)
return (hi << 8) + lo
def readU16(self, register):
"Reads an unsigned 16-bit value"
hi = self.i2c.readU8(register)
lo = self.i2c.readU8(register+1)
return (hi << 8) + lo
def readCalibrationData(self):
"Reads the calibration data from the IC"
self._cal_AC1 = self.readS16(self.__BMP085_CAL_AC1) # INT16
self._cal_AC2 = self.readS16(self.__BMP085_CAL_AC2) # INT16
self._cal_AC3 = self.readS16(self.__BMP085_CAL_AC3) # INT16
self._cal_AC4 = self.readU16(self.__BMP085_CAL_AC4) # UINT16
self._cal_AC5 = self.readU16(self.__BMP085_CAL_AC5) # UINT16
self._cal_AC6 = self.readU16(self.__BMP085_CAL_AC6) # UINT16
self._cal_B1 = self.readS16(self.__BMP085_CAL_B1) # INT16
self._cal_B2 = self.readS16(self.__BMP085_CAL_B2) # INT16
self._cal_MB = self.readS16(self.__BMP085_CAL_MB) # INT16
self._cal_MC = self.readS16(self.__BMP085_CAL_MC) # INT16
self._cal_MD = self.readS16(self.__BMP085_CAL_MD) # INT16
if (self.debug):
self.showCalibrationData()
def showCalibrationData(self):
"Displays the calibration values for debugging purposes"
print("DBG: AC1 = %6d" % (self._cal_AC1))
print("DBG: AC2 = %6d" % (self._cal_AC2))
print("DBG: AC3 = %6d" % (self._cal_AC3))
print("DBG: AC4 = %6d" % (self._cal_AC4))
print("DBG: AC5 = %6d" % (self._cal_AC5))
print("DBG: AC6 = %6d" % (self._cal_AC6))
print("DBG: B1 = %6d" % (self._cal_B1))
print("DBG: B2 = %6d" % (self._cal_B2))
print("DBG: MB = %6d" % (self._cal_MB))
print("DBG: MC = %6d" % (self._cal_MC))
print("DBG: MD = %6d" % (self._cal_MD))
def readRawTemp(self):
"Reads the raw (uncompensated) temperature from the sensor"
self.i2c.write8(self.__BMP085_CONTROL, self.__BMP085_READTEMPCMD)
time.sleep(0.005) # Wait 5ms
raw = self.readU16(self.__BMP085_TEMPDATA)
if (self.debug):
print("DBG: Raw Temp: 0x%04X (%d)" % (raw & 0xFFFF, raw))
return raw
def readRawPressure(self):
"Reads the raw (uncompensated) pressure level from the sensor"
self.i2c.write8(self.__BMP085_CONTROL, self.__BMP085_READPRESSURECMD + (self.mode << 6))
if (self.mode == self.__BMP085_ULTRALOWPOWER):
time.sleep(0.005)
elif (self.mode == self.__BMP085_HIGHRES):
time.sleep(0.014)
elif (self.mode == self.__BMP085_ULTRAHIGHRES):
time.sleep(0.026)
else:
time.sleep(0.008)
msb = self.i2c.readU8(self.__BMP085_PRESSUREDATA)
lsb = self.i2c.readU8(self.__BMP085_PRESSUREDATA+1)
xlsb = self.i2c.readU8(self.__BMP085_PRESSUREDATA+2)
raw = ((msb << 16) + (lsb << 8) + xlsb) >> (8 - self.mode)
if (self.debug):
print("DBG: Raw Pressure: 0x%04X (%d)" % (raw & 0xFFFF, raw))
return raw
def readTemperature(self):
"Gets the compensated temperature in degrees celcius"
UT = 0
X1 = 0
X2 = 0
B5 = 0
temp = 0.0
# Read raw temp before aligning it with the calibration values
UT = self.readRawTemp()
X1 = ((UT - self._cal_AC6) * self._cal_AC5) >> 15
X2 = (self._cal_MC << 11) / (X1 + self._cal_MD)
B5 = X1 + X2
temp = ((B5 + 8) >> 4) / 10.0
if (self.debug):
print("DBG: Calibrated temperature = %f C" % temp)
return temp
def readPressure(self):
"Gets the compensated pressure in pascal"
UT = 0
UP = 0
B3 = 0
B5 = 0
B6 = 0
X1 = 0
X2 = 0
X3 = 0
p = 0
B4 = 0
B7 = 0
UT = self.readRawTemp()
UP = self.readRawPressure()
# You can use the datasheet values to test the conversion results
# dsValues = True
dsValues = False
if (dsValues):
UT = 27898
UP = 23843
self._cal_AC6 = 23153
self._cal_AC5 = 32757
self._cal_MB = -32768;
self._cal_MC = -8711
self._cal_MD = 2868
self._cal_B1 = 6190
self._cal_B2 = 4
self._cal_AC3 = -14383
self._cal_AC2 = -72
self._cal_AC1 = 408
self._cal_AC4 = 32741
self.mode = self.__BMP085_ULTRALOWPOWER
if (self.debug):
self.showCalibrationData()
# True Temperature Calculations
X1 = ((UT - self._cal_AC6) * self._cal_AC5) >> 15
X2 = (self._cal_MC << 11) / (X1 + self._cal_MD)
B5 = X1 + X2
if (self.debug):
print("DBG: X1 = %d" % (X1))
print("DBG: X2 = %d" % (X2))
print("DBG: B5 = %d" % (B5))
print("DBG: True Temperature = %.2f C" % (((B5 + 8) >> 4) / 10.0))
# Pressure Calculations
B6 = B5 - 4000
X1 = (self._cal_B2 * (B6 * B6) >> 12) >> 11
X2 = (self._cal_AC2 * B6) >> 11
X3 = X1 + X2
B3 = (((self._cal_AC1 * 4 + X3) << self.mode) + 2) / 4
if (self.debug):
print("DBG: B6 = %d" % (B6))
print("DBG: X1 = %d" % (X1))
print("DBG: X2 = %d" % (X2))
print("DBG: X3 = %d" % (X3))
print("DBG: B3 = %d" % (B3))
X1 = (self._cal_AC3 * B6) >> 13
X2 = (self._cal_B1 * ((B6 * B6) >> 12)) >> 16
X3 = ((X1 + X2) + 2) >> 2
B4 = (self._cal_AC4 * (X3 + 32768)) >> 15
B7 = (UP - B3) * (50000 >> self.mode)
if (self.debug):
print("DBG: X1 = %d" % (X1))
print("DBG: X2 = %d" % (X2))
print("DBG: X3 = %d" % (X3))
print("DBG: B4 = %d" % (B4))
print("DBG: B7 = %d" % (B7))
if (B7 < 0x80000000):
p = (B7 * 2) / B4
else:
p = (B7 / B4) * 2
if (self.debug):
print("DBG: X1 = %d" % (X1))
X1 = (p >> 8) * (p >> 8)
X1 = (X1 * 3038) >> 16
X2 = (-7357 * p) >> 16
if (self.debug):
print("DBG: p = %d" % (p))
print("DBG: X1 = %d" % (X1))
print("DBG: X2 = %d" % (X2))
p = p + ((X1 + X2 + 3791) >> 4)
if (self.debug):
print("DBG: Pressure = %d Pa" % (p))
return p
def readAltitude(self, seaLevelPressure=101325):
"Calculates the altitude in meters"
altitude = 0.0
pressure = float(self.readPressure())
temperature = float(self.readTemperature())
# altitude = 44330.0 * (1.0 - pow(pressure / seaLevelPressure, 0.1903))
altitude = round( -math.log( pressure / seaLevelPressure ) * 8314 * ( temperature + 273.15 ) / ( 25 * 9.81 ) , 2 )
# this isn't completely correct. The formula uses the temperature of the sensor while it should be using the temperature
# at sea level. At lower altitudes and close (less then 200 km) from the shore, the difference is neglectable. If you want
# to use the script at higher locations or deeper inland, comment this line and uncomment the line above.
if (self.debug):
print("DBG: Altitude = %.2f m" % (altitude))
return altitude
return 0
| 35.083045
| 126
| 0.606569
|
17bcb4be46c269900cac772c45adcfd0b047ac5c
| 8,459
|
py
|
Python
|
docs/sphinx_extensions/codeinclude/codeinclude.py
|
siggisim/tecl
|
2a6c58ebf0ee7f9cb28e5e8f8b7970502a35c00d
|
[
"Apache-2.0"
] | null | null | null |
docs/sphinx_extensions/codeinclude/codeinclude.py
|
siggisim/tecl
|
2a6c58ebf0ee7f9cb28e5e8f8b7970502a35c00d
|
[
"Apache-2.0"
] | null | null | null |
docs/sphinx_extensions/codeinclude/codeinclude.py
|
siggisim/tecl
|
2a6c58ebf0ee7f9cb28e5e8f8b7970502a35c00d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Tulip Solutions B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Most of this file is taken from the original implementation of the 'literalinclude' directive included in Sphinx core;
# file sphinx/directives/code.py.
from docutils import nodes
from docutils.parsers.rst import directives
from six import text_type
from sphinx.directives.code import (
dedent_lines,
LiteralInclude,
LiteralIncludeReader
)
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util.nodes import set_source_info
if False:
# For type annotation
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
logger = logging.getLogger(__name__)
def caption_wrapper(literal_node, caption):
# type: (nodes.Node, unicode) -> nodes.container
container_node = nodes.container('', literal_block=True,
classes=['literal-block-wrapper'])
container_node += nodes.strong(caption, caption)
container_node += literal_node
return container_node
def get_min_indent_nonempty_line(lines):
min = -1
for raw_line in lines:
stripped_line = raw_line.strip()
if not stripped_line:
continue
lstripped_line = raw_line.lstrip()
num_chars_lstripped = len(raw_line) - len(lstripped_line)
if num_chars_lstripped < min or min == -1:
min = num_chars_lstripped
if min == -1:
min = 0
return min
class CodeIncludeReader(LiteralIncludeReader):
begin_marker_prefix = " CODEINCLUDE-BEGIN-MARKER: "
end_marker_prefix = " CODEINCLUDE-END-MARKER: "
def __init__(self, filename, options, config):
# type: (unicode, Dict, Config) -> None
self.filename = filename
self.options = options
self.encoding = options.get('encoding', config.source_encoding)
# Not calling superclass __init__ on purpose.
def read(self, location=None):
# type: (Any) -> Tuple[unicode, int]
filters = [self.markerid_filter,
self.autodedent_filter]
lines = self.read_file(self.filename, location=location)
for func in filters:
lines = func(lines, location=location)
return ''.join(lines), len(lines)
def autodedent_filter(self, lines, location=None):
# type: (List[unicode], Any) -> List[unicode]
if 'no-auto-dedent' in self.options:
return lines
else:
dedent_level = get_min_indent_nonempty_line(lines)
logger.debug(__('autodedent: %d' % dedent_level), location=location)
return dedent_lines(lines, dedent_level, location=location)
def markerid_filter(self, lines, location=None):
# type: (List[unicode], Any) -> List[unicode]
if 'marker-id' in self.options:
marker_str = self.options['marker-id'].strip()
begin_str = self.begin_marker_prefix + marker_str
end_str = self.end_marker_prefix + marker_str
return_lines = []
within_boundary = False
for lineno, line in enumerate(lines):
if line.rstrip().endswith(begin_str):
if within_boundary:
logger.warning(__('Repetitive begin-marker for marker-id \'%s\' on line %d'
% (marker_str, lineno + 1)), location=location)
within_boundary = True
elif line.rstrip().endswith(end_str):
if not within_boundary:
logger.warning(__('End-marker for marker-id \'%s\' on line %d, without having seen the '
'begin-marker' % (marker_str, lineno + 1)), location=location)
within_boundary = False
elif self.begin_marker_prefix in line or self.end_marker_prefix in line:
# Skip lines with other markers.
logger.debug(
__('Skipping line number %d with non-matching marker' % (lineno + 1)),
location=location
)
elif within_boundary:
return_lines.append(line)
if not return_lines:
logger.warning(__('No matching lines for marker \'%s\'' % marker_str), location=location)
return return_lines
else:
logger.info(__('marker-id not provided; outputting all lines in file'), location=location)
return lines
class CodeInclude(LiteralInclude):
"""
Like ``.. literalinclude:: file.ext``, but using markers instead of line offsets, auto-dedent and auto-language
features.
Markers are composed of a prefix and an ID, to match as literal string in source files. They are supposed to be on a
line by themselves, as they are omitted in the output themselves. Also the prefix should already be unique to the
use of this marker, as lines with markers which are not matching with the given marker-id will be omitted as well
too.
Markers with the same ID can occur multiple times and the matched range will be simply concatenated in the output.
The language is automatically detected by looking at the file extension. Recognized are those in the
``auto_language_map`` class member dict. To override, use the ``:language:`` field.
Code is automatically dedented (by its minimum indent on a non-empty line) - to turn that off, use the
``:no-auto-dedent:`` field (flag).
"""
option_spec = {
'no-auto-dedent': directives.flag,
'marker-id': directives.unchanged,
'language': directives.unchanged,
'encoding': directives.encoding,
'tab-width': int,
'class': directives.class_option,
'caption': directives.unchanged,
}
auto_language_map = {
'.py': 'python',
'.bazel': 'python',
'.bzl': 'python',
'.java': 'java',
'.go': 'go',
'.js': 'js',
}
def run(self):
# type: () -> List[nodes.Node]
document = self.state.document
if not document.settings.file_insertion_enabled:
return [document.reporter.warning('File insertion disabled',
line=self.lineno)]
try:
location = self.state_machine.get_source_and_line(self.lineno)
rel_filename, filename = self.env.relfn2path(self.arguments[0])
self.env.note_dependency(rel_filename)
reader = CodeIncludeReader(filename, self.options, self.config)
text, lines = reader.read(location=location)
retnode = nodes.literal_block(text, text, source=filename)
set_source_info(self, retnode)
if 'language' in self.options:
retnode['language'] = self.options['language']
else:
for ext, lang in self.auto_language_map.items():
if filename.endswith(ext):
retnode['language'] = lang
break
retnode['classes'] += self.options.get('class', [])
if 'caption' in self.options:
caption = self.options['caption']
retnode = caption_wrapper(retnode, caption)
# retnode will be note_implicit_target that is linked from caption and numref.
# when options['name'] is provided, it should be primary ID.
self.add_name(retnode)
return [retnode]
except Exception as exc:
return [document.reporter.warning(text_type(exc), line=self.lineno)]
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
directives.register_directive('codeinclude', CodeInclude)
return {
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 39.713615
| 120
| 0.623596
|
fb1f7741745e6654952bcd2035facb84d4b60723
| 971
|
py
|
Python
|
3rdparty/openmm/wrappers/python/tests/TestBytes.py
|
merkys/MMB
|
0531385b8367405e1188e31c3eef7aa4cc50170b
|
[
"MIT"
] | 5
|
2020-07-31T17:33:03.000Z
|
2022-01-01T19:24:37.000Z
|
3rdparty/openmm/wrappers/python/tests/TestBytes.py
|
merkys/MMB
|
0531385b8367405e1188e31c3eef7aa4cc50170b
|
[
"MIT"
] | 11
|
2020-06-16T05:05:42.000Z
|
2022-03-30T09:59:14.000Z
|
3rdparty/openmm/wrappers/python/tests/TestBytes.py
|
merkys/MMB
|
0531385b8367405e1188e31c3eef7aa4cc50170b
|
[
"MIT"
] | 9
|
2020-01-24T12:02:37.000Z
|
2020-10-16T06:23:56.000Z
|
import unittest
import simtk.openmm as mm
class TestBytes(unittest.TestCase):
def test_createCheckpoint(self):
system = mm.System()
system.addParticle(1.0)
refPositions = [(0,0,0)]
platform = mm.Platform.getPlatformByName('Reference')
context = mm.Context(system, mm.VerletIntegrator(0), platform)
context.setPositions(refPositions)
chk = context.createCheckpoint()
# check that the return value of createCheckpoint is of type bytes (non-unicode)
assert isinstance(chk, bytes)
# set the positions to something random then reload the checkpoint, and
# make sure that the positions get restored correctly
context.setPositions([(12345, 12345, 123451)])
context.loadCheckpoint(chk)
newPositions = context.getState(getPositions=True).getPositions()._value
assert newPositions == refPositions
if __name__ == '__main__':
unittest.main()
| 31.322581
| 88
| 0.680742
|
4bc9f733a39b200aefba18d8af7226b2b7db0d0d
| 7,769
|
py
|
Python
|
myGUI.py
|
developedby/PyAPT
|
633ea6c6b4a84c6df18256b89801f3d6cb47a92b
|
[
"MIT"
] | null | null | null |
myGUI.py
|
developedby/PyAPT
|
633ea6c6b4a84c6df18256b89801f3d6cb47a92b
|
[
"MIT"
] | null | null | null |
myGUI.py
|
developedby/PyAPT
|
633ea6c6b4a84c6df18256b89801f3d6cb47a92b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
APT Motor Controller GUI for Thorlabs
V1.0
20150417 V1.0 First working version
Michael Leung
mcleung@stanford.edu
"""
#Title: OpenFFOCT
version='1.0'
#Date: April 17, 2015
#Python Version 2.7.9
import os
import platform
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
import struct
import time
# import multiprocessing
from pyapt import APTMotor
# TODO: Implement multiprocess and OpenCL
#MULTIPROCESS = False
#USEOCL = False
class MainWindow(QMainWindow):
def __init__(self, parent=None, verbose=False):
"""
Main window
"""
self.setWindowTitle("OpenFFOCT "+version)
#Set central widget - Main Scan window
self.mainWindow = MainScanWindow(self.dataObject)
self.setCentralWidget(self.mainWindow)
#Set main toolbar
mainToolBar = self.addToolBar("Tools")
def closeEvent(self, event):
"""
Clean up child widgets before exit
1) Close the active COM port before application exit
2) Kill the child process if MULTIPROCESS is True
"""
self.mcuWidget.MCU.close()
self.mainWindow.dataObject.terminate()
event.accept()
class widgetAPT(QWidget):
def __init__(self, parent = None, serial=00000000, verbose=False):
super(widgetAPT, self).__init__(parent)
self.resize(200, 100)
#self.move(100, 100)
#setGeometry sets both location and size
#self.setGeometry(50, 50, 1180, 900)
self.setWindowTitle('APT Motor')
#self.m = APTMotor(0)
# QT GridLayout
# TODO: Implement GridLayout
#grid = QGridLayout()
# Layout objects
sAuthor = QLabel("QT-APT", self)
sAuthor.resize(100, 20)
sAuthor.move(100, 0)
sAuthor.setAlignment(Qt.AlignRight)
sVersion = QLabel("v1.0.0", self)
sVersion.resize(100, 20)
sVersion.move(100, 15)
sVersion.setAlignment(Qt.AlignRight)
sEmail = QLabel("Michael Leung", self)
sEmail.resize(100, 40)
sEmail.move(100, 30)
sEmail.setAlignment(Qt.AlignRight)
# Motor Serial Number
sSer = QLabel("Serial:", self)
sSer.resize(60, 20)
sSer.move(0, 0)
self.txtSerial = QSpinBox(self)
self.txtSerial.resize(70,20)
self.txtSerial.move(30,0)
self.txtSerial.setRange(0, 99999999)
self.txtSerial.setSingleStep(1)
self.txtSerial.setValue(83840946)
# qle.textChanged[str].connect(self.onChanged) #do onChanged when changed
self._Motor_ = APTMotor(verbose=verbose)
# Motor Connect
self.btnConnect = QPushButton("Connect", self)
self.btnConnect.setStyleSheet("background-color: grey")
self.btnConnect.setText("Connect")
self.btnConnect.setCheckable(True)
self.btnConnect.setToolTip("Connect to Motor")
self.btnConnect.resize(50, 20)
self.btnConnect.move(105, 0)
self.btnConnect.clicked[bool].connect(self.connectAPT)
sPos = QLabel("Pos:", self)
sPos.resize(70, 20)
sPos.move(0, 25)
self.txtPos = QDoubleSpinBox(self)
self.txtPos.resize(60, 20)
self.txtPos.move(30, 25)
#self.txtPos.setMaxLength(7)
self.txtPos.setRange(0, 20)
self.txtPos.setSingleStep(.1)
self.txtPos.setDecimals(5)
self.txtPos.setValue(0.0000000)
self.txtPos.setToolTip("Current Motor Position")
#self.txtPos.setValidator( QDoubleValidator(0, 100, 2) )
self.txtPos.setEnabled(False)
# Go to position
btnGOp = QPushButton("Go", self)
btnGOp.resize(25, 20)
btnGOp.move(100, 25)
btnGOp.clicked.connect(lambda: self.motAbs(float(self.txtPos.text())))
# Movement buttons
btnN3 = QPushButton("-100", self)
btnN3.resize(32, 20)
btnN3.move(0, 50)
btnN3.clicked.connect(lambda: self.motRel(-.1))
btnN2 = QPushButton("-10", self)
btnN2.resize(32, 20)
btnN2.move(33, 50)
btnN2.clicked.connect(lambda: self.motRel(-.01))
btnN1 = QPushButton("-1", self)
btnN1.resize(32, 20)
btnN1.move(66, 50)
btnN1.clicked.connect(lambda: self.motRel(-.001))
btnP1 = QPushButton("+1", self)
btnP1.resize(32, 20)
btnP1.move(100, 50)
btnP1.clicked.connect(lambda: self.motRel(.001))
btnP2 = QPushButton("+10", self)
btnP2.resize(32, 20)
btnP2.move(133, 50)
btnP2.clicked.connect(lambda: self.motRel(.01))
btnP3 = QPushButton("+100", self)
btnP3.resize(32, 20)
btnP3.move(166, 50)
btnP3.clicked.connect(lambda: self.motRel(.1))
sVel = QLabel("Vel:", self)
sVel.resize(60, 20)
sVel.move(0, 75)
self.txtVel = QDoubleSpinBox(self)
self.txtVel.resize(60, 20)
self.txtVel.move(30, 75)
#self.txtVel.setMaxLength(7)
self.txtVel.setRange(0, 2.2)
self.txtVel.setSingleStep(.1)
self.txtVel.setValue(0.000)
self.txtVel.setToolTip("Current Motor Position")
self.txtVel.setEnabled(False)
# Go to velocity
btnGOv = QPushButton("Go", self)
btnGOv.resize(25, 20)
btnGOv.move(100, 75)
btnGOv.clicked.connect(lambda: self._Motor_.setVel(float(self.txtVel.text())))
sBack = QLabel("Backlash:", self)
sBack.resize(60, 20)
sBack.move(130, 75)
self.cbBacklash = QCheckBox(self)
self.cbBacklash.resize(60, 20)
self.cbBacklash.move(180, 75)
self.show()
def connectAPT(self, pressed):
if pressed:
#APT Motor connect
Serial = int(self.txtSerial.text())
self._Motor_.setSerialNumber(Serial)
self._Motor_.initializeHardwareDevice()
# Success
self.btnConnect.setStyleSheet("background-color: green")
self.btnConnect.setText("Connected")
self.txtSerial.setEnabled(False)
self.txtPos.setEnabled(True)
# Update text to show position
self.txtPos.setValue( self._Motor_.getPos() )
self.txtVel.setEnabled(True)
_, _, maxVel = self._Motor_.getVelocityParameters()
self.txtVel.setValue( maxVel )
return True
else:
#APT Motor disconnect
self._Motor_.cleanUpAPT()
# Success
self.btnConnect.setStyleSheet("background-color: grey")
self.btnConnect.setText("Connect")
self.txtSerial.setEnabled(True)
self.txtPos.setEnabled(False)
self.txtVel.setEnabled(False)
self.txtPos.setValue(0.0000)
self.txtPos.setToolTip("Current Motor Position")
return True
def motRel(self, relDistance):
if self.cbBacklash.isChecked() :
self._Motor_.mbRel(relDistance)
else:
self._Motor_.mRel(relDistance)
# Update text to show position
self.txtPos.setValue( self._Motor_.getPos() )
def motAbs(self, absDistance):
if self.cbBacklash.isChecked() :
self._Motor_.mbAbs(absDistance)
else:
self._Motor_.mAbs(absDistance)
# Update text to show position
self.txtPos.setValue( self._Motor_.getPos() )
if __name__ == '__main__':
app = QApplication(sys.argv)
#splash_pix = QPixmap('')
#splash = QSplashScreen(splash_pix, Qt.WindowStaysOnTopHint)
#splash.show()
form = widgetAPT(verbose=True)
#form.setWindowState(Qt.WindowMaximized)
#form.show()
#splash.finish(form)
sys.exit(app.exec_())
| 30.70751
| 86
| 0.61179
|
e6cfe9f1ea851c53d79102bc9b2ff8d235fe1c16
| 6,474
|
py
|
Python
|
ansible/lib/ansible/modules/extras/source_control/bzr.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/modules/extras/source_control/bzr.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/modules/extras/source_control/bzr.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, André Paramés <git@andreparames.com>
# Based on the Git module by Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = u'''
---
module: bzr
author: "André Paramés (@andreparames)"
version_added: "1.1"
short_description: Deploy software (or files) from bzr branches
description:
- Manage I(bzr) branches to deploy files or software.
options:
name:
required: true
aliases: [ 'parent' ]
description:
- SSH or HTTP protocol address of the parent branch.
dest:
required: true
description:
- Absolute path of where the branch should be cloned to.
version:
required: false
default: "head"
description:
- What version of the branch to clone. This can be the
bzr revno or revid.
force:
required: false
default: "no"
choices: [ 'yes', 'no' ]
description:
- If C(yes), any modified files in the working
tree will be discarded. Before 1.9 the default
value was "yes".
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to bzr executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
'''
EXAMPLES = '''
# Example bzr checkout from Ansible Playbooks
- bzr: name=bzr+ssh://foosball.example.org/path/to/branch dest=/srv/checkout version=22
'''
import re
class Bzr(object):
def __init__(self, module, parent, dest, version, bzr_path):
self.module = module
self.parent = parent
self.dest = dest
self.version = version
self.bzr_path = bzr_path
def _command(self, args_list, cwd=None, **kwargs):
(rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
return (rc, out, err)
def get_version(self):
'''samples the version of the bzr branch'''
cmd = "%s revno" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
revno = stdout.strip()
return revno
def clone(self):
'''makes a new bzr branch if it does not already exist'''
dest_dirname = os.path.dirname(self.dest)
try:
os.makedirs(dest_dirname)
except:
pass
if self.version.lower() != 'head':
args_list = ["branch", "-r", self.version, self.parent, self.dest]
else:
args_list = ["branch", self.parent, self.dest]
return self._command(args_list, check_rc=True, cwd=dest_dirname)
def has_local_mods(self):
cmd = "%s status -S" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
lines = stdout.splitlines()
lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
return len(lines) > 0
def reset(self, force):
'''
Resets the index and working tree to head.
Discards any changes to tracked files in the working
tree since that commit.
'''
if not force and self.has_local_mods():
self.module.fail_json(msg="Local modifications exist in branch (force=no).")
return self._command(["revert"], check_rc=True, cwd=self.dest)
def fetch(self):
'''updates branch from remote sources'''
if self.version.lower() != 'head':
(rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
else:
(rc, out, err) = self._command(["pull"], cwd=self.dest)
if rc != 0:
self.module.fail_json(msg="Failed to pull")
return (rc, out, err)
def switch_version(self):
'''once pulled, switch to a particular revno or revid'''
if self.version.lower() != 'head':
args_list = ["revert", "-r", self.version]
else:
args_list = ["revert"]
return self._command(args_list, check_rc=True, cwd=self.dest)
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
dest=dict(required=True, type='path'),
name=dict(required=True, aliases=['parent']),
version=dict(default='head'),
force=dict(default='no', type='bool'),
executable=dict(default=None),
)
)
dest = module.params['dest']
parent = module.params['name']
version = module.params['version']
force = module.params['force']
bzr_path = module.params['executable'] or module.get_bin_path('bzr', True)
bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf')
rc, out, err, status = (0, None, None, None)
bzr = Bzr(module, parent, dest, version, bzr_path)
# if there is no bzr configuration, do a branch operation
# else pull and switch the version
before = None
local_mods = False
if not os.path.exists(bzrconfig):
(rc, out, err) = bzr.clone()
else:
# else do a pull
local_mods = bzr.has_local_mods()
before = bzr.get_version()
(rc, out, err) = bzr.reset(force)
if rc != 0:
module.fail_json(msg=err)
(rc, out, err) = bzr.fetch()
if rc != 0:
module.fail_json(msg=err)
# switch to version specified regardless of whether
# we cloned or pulled
(rc, out, err) = bzr.switch_version()
# determine if we changed anything
after = bzr.get_version()
changed = False
if before != after or local_mods:
changed = True
module.exit_json(changed=changed, before=before, after=after)
# import module snippets
from ansible.module_utils.basic import *
main()
| 32.37
| 96
| 0.60658
|
56724c80558b68b72234465a7dc0989dde8e0ecb
| 725
|
py
|
Python
|
space_3d.py
|
rokkian/adam_implementation
|
37cef22a15d7b25def9d451cf951b30bb76333c6
|
[
"Apache-2.0"
] | null | null | null |
space_3d.py
|
rokkian/adam_implementation
|
37cef22a15d7b25def9d451cf951b30bb76333c6
|
[
"Apache-2.0"
] | null | null | null |
space_3d.py
|
rokkian/adam_implementation
|
37cef22a15d7b25def9d451cf951b30bb76333c6
|
[
"Apache-2.0"
] | null | null | null |
from numpy import arange, meshgrid
from matplotlib import pyplot
#funzione obiettivo
def objective(x, y):
'''funzione obiettivo z = x**2 + y**2 '''
return x ** 2.0 + y ** 2.0
if __name__ == '__main__':
#range dell'input
r_min, r_max = -1.0, 1.0
#campiono il range in modo uniforme
xaxis = arange(r_min, r_max, 0.1)
yaxis = arange(r_min, r_max, 0.1)
#creo un mesh dall'asse
x, y = meshgrid(xaxis, yaxis, sparse=False)
print(x,'\n',y)
#calcolo i target
results = objective(x, y)
#creo il plot della superficie, con il color scheme jet
figure = pyplot.figure()
axis = figure.gca(projection='3d')
axis.plot_surface(x, y, results, cmap="jet")
pyplot.show()
| 25.892857
| 59
| 0.633103
|
4584bf058179a3ce84640696a9865ce191c0f25a
| 13,698
|
py
|
Python
|
mmd_scripting/scripts_for_gui/check_model_compatibility.py
|
Nuthouse01/PMX-VMD-Scripting-Tools
|
21a397db92c5f8034494dfcc41c7814096d3dd50
|
[
"MIT"
] | 54
|
2020-07-15T02:51:47.000Z
|
2022-03-22T01:33:54.000Z
|
mmd_scripting/scripts_for_gui/check_model_compatibility.py
|
Nuthouse01/VMD-to-text-Conversion-Tool
|
0d9334bd5735accdd8bb6e1b69889fbe054a7481
|
[
"MIT"
] | 5
|
2020-09-05T23:22:40.000Z
|
2021-11-26T22:32:09.000Z
|
mmd_scripting/scripts_for_gui/check_model_compatibility.py
|
Nuthouse01/VMD-to-text-Conversion-Tool
|
0d9334bd5735accdd8bb6e1b69889fbe054a7481
|
[
"MIT"
] | 13
|
2020-09-05T19:06:27.000Z
|
2022-02-14T16:01:09.000Z
|
import mmd_scripting.core.nuthouse01_core as core
import mmd_scripting.core.nuthouse01_packer as pack
import mmd_scripting.core.nuthouse01_pmx_parser as pmxlib
import mmd_scripting.core.nuthouse01_vmd_parser as vmdlib
import mmd_scripting.core.nuthouse01_vmd_utils as vmdutil
import mmd_scripting.core.nuthouse01_vpd_parser as vpdlib
_SCRIPT_VERSION = "Script version: Nuthouse01 - v0.5.03 - 10/10/2020"
# This code is free to use and re-distribute, but I cannot be held responsible for damages that it may or may not cause.
#####################
########################################################################################################################
# constants & options
########################################################################################################################
# if true, print items that match as well as items that miss
# if false, print only items that miss
PRINT_MATCHING_ITEMS = False
helptext = '''=================================================
check_model_compatability:
This tool will check for compabability between a given model (PMX) and a given dance motion (VMD) or pose (VPD).
This means checking whether the model supports all the bones and/or morphs the VMD/VPD is trying to use.
All bone/morph names are compared using the JP names.
This requires both a PMX model and a VMD motion to run.
'''
def main(moreinfo=True):
# prompt PMX name
core.MY_PRINT_FUNC("Please enter name of PMX input file:")
input_filename_pmx = core.MY_FILEPROMPT_FUNC("PMX file", ".pmx")
pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo)
# prompt VMD file name
core.MY_PRINT_FUNC("")
core.MY_PRINT_FUNC("Please enter name of VMD motion or VPD pose file to check compatability with:")
input_filename = core.MY_FILEPROMPT_FUNC("VMD or VPD file",(".vmd",".vpd"))
if not input_filename.lower().endswith(".vpd"):
# the actual VMD part isn't even used, only bonedict and morphdict
vmd = vmdlib.read_vmd(input_filename, moreinfo=moreinfo)
else:
vmd = vpdlib.read_vpd(input_filename, moreinfo=moreinfo)
bonedict = vmdutil.parse_vmd_used_dict(vmd.boneframes, moreinfo=moreinfo)
morphdict = vmdutil.parse_vmd_used_dict(vmd.morphframes, moreinfo=moreinfo)
core.MY_PRINT_FUNC("")
# must use same encoding as I used when the VMD was unpacked, since the hex bytes only have meaning in that encoding
pack.set_encoding("shift_jis")
##############################################
# check morph compatability
# build list of morphs used in the dance VMD
morphs_in_vmd = list(morphdict.keys())
# build list of ALL morphs in the PMX
morphs_in_model = [pmxmorph.name_jp for pmxmorph in pmx.morphs]
# ensure that the VMD contains at least some morphs, to prevent zero-divide error
if len(morphs_in_vmd) == 0:
core.MY_PRINT_FUNC("MORPH SKIP: VMD '%s' does not contain any morphs that are used in a meaningful way." % core.filepath_splitdir(input_filename)[1])
elif len(morphs_in_model) == 0:
core.MY_PRINT_FUNC("MORPH SKIP: PMX '%s' does not contain any morphs." % core.filepath_splitdir(input_filename_pmx)[1])
else:
# convert pmx-morph names to bytes
# these can plausibly fail shift_jis encoding because they came from the UTF-8 pmx file
morphs_in_model_b = []
for a in morphs_in_model:
try:
b = pack.encode_string_with_escape(a)
except UnicodeEncodeError as e:
newerrstr = "%s: '%s' codec cannot encode char '%s' within string '%s'" % (
e.__class__.__name__, e.encoding, e.reason[e.start:e.end], e.reason)
core.MY_PRINT_FUNC(newerrstr)
b = bytearray()
morphs_in_model_b.append(b)
# convert vmd-morph names to bytes
# these might be truncated but cannot fail because they were already decoded from the shift_jis vmd file
morphs_in_vmd_b = [pack.encode_string_with_escape(a) for a in morphs_in_vmd]
matching_morphs = {}
missing_morphs = {}
# iterate over list of morphs
for vmdmorph, vmdmorph_b in zip(morphs_in_vmd, morphs_in_vmd_b):
# question: does "vmdmorph" match something in "morphs_in_model"?
# BUT, doing comparison in bytes-space to handle escape characters: vmdmorph_b vs morphs_in_model_b
# NOTE: MMD does not try to use "begins-with" matching like I had hoped/assumed, it only looks for exact matches
# return list of ALL matches, this way i can raise an error if there are multiple matches
# exact match
modelmorphmatch_b = [a for a in morphs_in_model_b if a == vmdmorph_b]
# copy the key,val in one of the dicts depending on results of matching attempt
if len(modelmorphmatch_b) == 0:
# MISS! key is the VMD morph name since that's the best clue Ive got
missing_morphs[vmdmorph] = morphdict[vmdmorph]
elif len(modelmorphmatch_b) == 1:
# MATCH! key is the PMX morph name it matched against, since it might be a longer version wtihout escape char
matching_morphs[pack.decode_bytes_with_escape(modelmorphmatch_b[0])] = morphdict[vmdmorph]
else:
# more than 1 morph was a match!?
core.MY_PRINT_FUNC("Warning: VMDmorph '%s' matched multiple PMXmorphs, its behavior is uncertain." % vmdmorph)
modelmorphmatch = [pack.decode_bytes_with_escape(a) for a in modelmorphmatch_b]
# core.MY_PRINT_FUNC(modelmorphmatch)
matching_morphs[modelmorphmatch[0]] = morphdict[vmdmorph]
# display results!
r = "PASS" if len(matching_morphs) == len(morphs_in_vmd) else "FAIL"
core.MY_PRINT_FUNC("MORPH {}: {} / {} = {:.1%} of the morphs are supported".format(
r, len(matching_morphs), len(morphs_in_vmd), len(matching_morphs) / len(morphs_in_vmd)))
# if there are no missing morphs (all match), don't print anything at all
if missing_morphs:
if not moreinfo:
core.MY_PRINT_FUNC("For detailed list, please re-run with 'more info' enabled")
else:
# convert the dicts to lists and sort for printing
# sort in-place descending by 2nd element as primary
missing_morphs_list = sorted(list(missing_morphs.items()), key=core.get2nd, reverse=True)
# justify the names!
missing_just = core.MY_JUSTIFY_STRINGLIST(["'" + m[0] + "'" for m in missing_morphs_list])
# re-attach the justified names to the usage numbers
missing_morphs_list = list(zip(missing_just, [m[1] for m in missing_morphs_list]))
core.MY_PRINT_FUNC("")
core.MY_PRINT_FUNC("Unsupported morphs: name + times used")
for m, num in missing_morphs_list:
core.MY_PRINT_FUNC(" %s || %d" % (m, int(num)))
# only print the matching morphs if there are some, and if enabled by options
if matching_morphs and PRINT_MATCHING_ITEMS:
matching_morphs_list = list(matching_morphs.items())
matching_morphs_list.sort(key=core.get2nd, reverse=True) # sort in-place descending by 2nd element as primary
matching_just = core.MY_JUSTIFY_STRINGLIST(["'" + m[0] + "'" for m in matching_morphs_list])
matching_morphs_list = list(zip(matching_just, [m[1] for m in matching_morphs_list]))
core.MY_PRINT_FUNC("")
core.MY_PRINT_FUNC("Supported morphs: name + times used")
for m, num in matching_morphs_list:
core.MY_PRINT_FUNC(" %s || %d" % (m, int(num)))
##############################################
# check bone compatability
core.MY_PRINT_FUNC("")
# build list of bones used in the dance VMD
bones_in_vmd = list(bonedict.keys())
# build list of ALL bones in the PMX
# first item of pmxbone is the jp name
bones_in_model = [pmxbone.name_jp for pmxbone in pmx.bones]
# ensure that the VMD contains at least some bones, to prevent zero-divide error
if len(bones_in_vmd) == 0:
core.MY_PRINT_FUNC("BONE SKIP: VMD '%s' does not contain any bones that are used in a meaningful way." % core.filepath_splitdir(input_filename)[1])
elif len(bones_in_model) == 0:
core.MY_PRINT_FUNC("BONE SKIP: PMX '%s' does not contain any bones." % core.filepath_splitdir(input_filename_pmx)[1])
else:
# convert pmx-bone names to bytes
# these can plausibly fail shift_jis encoding because they came from the UTF-8 pmx file
bones_in_model_b = []
for a in bones_in_model:
try:
b = pack.encode_string_with_escape(a)
except UnicodeEncodeError as e:
newerrstr = "%s: '%s' codec cannot encode char '%s' within string '%s'" % (
e.__class__.__name__, e.encoding, e.reason[e.start:e.end], e.reason)
core.MY_PRINT_FUNC(newerrstr)
b = bytearray()
bones_in_model_b.append(b)
# convert vmd-bone names to bytes
# these might be truncated but cannot fail because they were already decoded from the shift_jis vmd file
bones_in_vmd_b = [pack.encode_string_with_escape(a) for a in bones_in_vmd]
matching_bones = {}
missing_bones = {}
# iterate over list of bones that pass the size check
for vmdbone, vmdbone_b in zip(bones_in_vmd, bones_in_vmd_b):
# question: does "vmdbone" match something in "bones_in_model"?
# BUT, doing comparison in bytes-space to handle escape characters: vmdbone_b vs bones_in_model_b
# NOTE: MMD does not try to use "begins-with" matching like I had hoped/assumed, it only looks for exact matches
# return list of ALL matches, this way i can raise an error if there are multiple matches
# exact match
modelbonematch_b = [a for a in bones_in_model_b if a == vmdbone_b]
# copy the key,val in one of the dicts depending on results of matching attempt
if len(modelbonematch_b) == 0:
# MISS! key is the VMD bone name since that's the best clue Ive got
missing_bones[vmdbone] = bonedict[vmdbone]
elif len(modelbonematch_b) == 1:
# MATCH! key is the PMX bone name it matched against, since it might be a longer version wtihout escape char
matching_bones[pack.decode_bytes_with_escape(modelbonematch_b[0])] = bonedict[vmdbone]
else:
# more than 1 bone was a match!?
core.MY_PRINT_FUNC("Warning: VMDbone '%s' matched multiple PMXbones, its behavior is uncertain." % vmdbone)
modelbonematch = [pack.decode_bytes_with_escape(a) for a in modelbonematch_b]
# core.MY_PRINT_FUNC(modelbonematch)
matching_bones[modelbonematch[0]] = bonedict[vmdbone]
# display results!
if len(matching_bones) == len(bones_in_vmd):
core.MY_PRINT_FUNC("BONE PASS: {} / {} = {:.1%} of the bones are supported".format(
len(matching_bones), len(bones_in_vmd), len(matching_bones) / len(bones_in_vmd)))
else:
core.MY_PRINT_FUNC("BONE FAIL: {} / {} = {:.1%} of the bones are supported".format(
len(matching_bones), len(bones_in_vmd), len(matching_bones) / len(bones_in_vmd)))
core.MY_PRINT_FUNC("(Note: it is common for a motion to be saved with a frame at t=0 for EVERY bone, including the physics bones. These physics bones don't obey those keyframes, however. So if you see many many unsupported bones with exactly one keyframe each, then it's probably a false positive, and your model doesn't actually need to support those bones.)")
# if there are no missing bones (all match), don't print anything at all
if missing_bones:
if not moreinfo:
core.MY_PRINT_FUNC("For detailed list, please re-run with 'more info' enabled")
else:
# convert the dicts to lists and sort for printing
# sort in-place descending by 2nd element as primary
missing_bones_list = sorted(list(missing_bones.items()), key=core.get2nd, reverse=True)
# justify the names!
missing_just = core.MY_JUSTIFY_STRINGLIST(["'" + m[0] + "'" for m in missing_bones_list])
# re-attach the justified names to the usage numbers
missing_bones_list = list(zip(missing_just, [m[1] for m in missing_bones_list]))
core.MY_PRINT_FUNC("")
core.MY_PRINT_FUNC("Unsupported bones: name + times used")
for m, num in missing_bones_list:
core.MY_PRINT_FUNC(" %s || %d" % (m, int(num)))
# only print the matching bones if there are some, and if enabled by options
if matching_bones and PRINT_MATCHING_ITEMS:
matching_bones_list = list(matching_bones.items())
matching_bones_list.sort(key=core.get2nd, reverse=True) # sort in-place descending by 2nd element as primary
matching_just = core.MY_JUSTIFY_STRINGLIST(["'" + m[0] + "'" for m in matching_bones_list])
matching_bones_list = list(zip(matching_just, [m[1] for m in matching_bones_list]))
core.MY_PRINT_FUNC("")
core.MY_PRINT_FUNC("Supported bones: name + times used")
for m, num in matching_bones_list:
core.MY_PRINT_FUNC(" %s || %d" % (m, int(num)))
# NEW: among matching bones, check whether any bones have unsupported translation/rotation
for bonestr in sorted(list(matching_bones.keys())):
# get the bone to get whether rot/trans enabled
bone = core.my_list_search(pmx.bones, lambda x: x.name_jp == bonestr, getitem=True)
# get all the frames from the VMD that are relevant to this bone
thisboneframes = [f for f in vmd.boneframes if f.name == bonestr]
# does the VMD use rotation? probably, check anyway
vmd_use_rot = any(f.rot != [0,0,0] for f in thisboneframes)
if vmd_use_rot and not (bone.has_rotate and bone.has_enabled):
# raise some sort of warning
w = "Warning: supported bone '%s' uses rotation in VMD, but rotation not allowed by PMX" % bonestr
core.MY_PRINT_FUNC(w)
# does the VMD use translation?
vmd_use_trans = any(f.pos != [0,0,0] for f in thisboneframes)
if vmd_use_trans and not (bone.has_translate and bone.has_enabled):
# raise some sort of warning
w = "Warning: supported bone '%s' uses move/shift in VMD, but move/shift not allowed by PMX" % bonestr
core.MY_PRINT_FUNC(w)
core.MY_PRINT_FUNC("")
core.MY_PRINT_FUNC("Done!")
return None
if __name__ == '__main__':
core.MY_PRINT_FUNC(_SCRIPT_VERSION)
core.MY_PRINT_FUNC(helptext)
core.RUN_WITH_TRACEBACK(main)
| 49.992701
| 364
| 0.709812
|
0b396b5066540ef95a3cd6c1716a030e2a7b69ec
| 15,425
|
py
|
Python
|
discovery-provider/src/tasks/user_replica_set.py
|
Tenderize/audius-protocol
|
aa15844e3f12812fe8aaa81e2cb6e5c5fa89ff51
|
[
"Apache-2.0"
] | null | null | null |
discovery-provider/src/tasks/user_replica_set.py
|
Tenderize/audius-protocol
|
aa15844e3f12812fe8aaa81e2cb6e5c5fa89ff51
|
[
"Apache-2.0"
] | null | null | null |
discovery-provider/src/tasks/user_replica_set.py
|
Tenderize/audius-protocol
|
aa15844e3f12812fe8aaa81e2cb6e5c5fa89ff51
|
[
"Apache-2.0"
] | null | null | null |
import logging
from datetime import datetime
from sqlalchemy.orm.session import make_transient
from src.app import get_contract_addresses, get_eth_abi_values
from src.models import URSMContentNode
from src.tasks.users import invalidate_old_user, lookup_user_record
from src.utils.eth_contracts_helpers import (
content_node_service_type,
sp_factory_registry_key,
)
from src.utils.indexing_errors import IndexingError
from src.utils.redis_cache import get_pickled_key, get_sp_id_key
from src.utils.user_event_constants import (
user_replica_set_manager_event_types_arr,
user_replica_set_manager_event_types_lookup,
)
logger = logging.getLogger(__name__)
def user_replica_set_state_update(
self,
update_task,
session,
user_replica_set_mgr_txs,
block_number,
block_timestamp,
block_hash,
redis,
):
"""Return int representing number of User model state changes found in transaction and set of user_id values"""
num_user_replica_set_changes = 0
user_ids = set()
if not user_replica_set_mgr_txs:
return num_user_replica_set_changes, user_ids
user_replica_set_manager_abi = update_task.abi_values["UserReplicaSetManager"][
"abi"
]
user_contract = update_task.web3.eth.contract(
address=get_contract_addresses()["user_replica_set_manager"],
abi=user_replica_set_manager_abi,
)
# This stores the state of the user object along with all the events applied to it
# before it gets committed to the db
# Data format is {"user_id": {"user", "events": []}}
# NOTE - events are stored only for debugging purposes and not used or persisted anywhere
user_replica_set_events_lookup = {}
# This stores the state of the cnode object along with all events applied
# Data format is {"cnode_sp_id": {"cnode_record", "events":[]}}
cnode_events_lookup = {}
# pylint: disable=too-many-nested-blocks
for tx_receipt in user_replica_set_mgr_txs:
txhash = update_task.web3.toHex(tx_receipt.transactionHash)
for event_type in user_replica_set_manager_event_types_arr:
user_events_tx = getattr(user_contract.events, event_type)().processReceipt(
tx_receipt
)
for entry in user_events_tx:
try:
args = entry["args"]
# Check if _userId is present
# If user id is found in the event args, update the local lookup object
user_id = args._userId if "_userId" in args else None
if user_id:
user_ids.add(user_id)
# Check if cnodeId is present
# If cnode id is found in event args, update local lookup object
cnode_sp_id = args._cnodeSpId if "_cnodeSpId" in args else None
# if the user id is not in the lookup object, it hasn't been initialized yet
# first, get the user object from the db(if exists or create a new one)
# then set the lookup object for user_id with the appropriate props
if user_id and (user_id not in user_replica_set_events_lookup):
ret_user = lookup_user_record(
update_task,
session,
entry,
block_number,
block_timestamp,
txhash,
)
user_replica_set_events_lookup[user_id] = {
"user": ret_user,
"events": [],
}
if cnode_sp_id and (cnode_sp_id not in cnode_events_lookup):
ret_cnode = lookup_ursm_cnode(
update_task,
session,
entry,
block_number,
block_timestamp,
txhash,
)
cnode_events_lookup[cnode_sp_id] = {
"content_node": ret_cnode,
"events": [],
}
# Add or update the value of the user record for this block in user_replica_set_events_lookup,
# ensuring that multiple events for a single user result in only 1 row insert operation
# (even if multiple operations are present)
if (
event_type
== user_replica_set_manager_event_types_lookup[
"update_replica_set"
]
):
primary = args._primaryId
secondaries = args._secondaryIds
signer = args._signer
user_record = user_replica_set_events_lookup[user_id]["user"]
user_record.updated_at = datetime.utcfromtimestamp(
block_timestamp
)
user_record.primary_id = primary
user_record.secondary_ids = secondaries
user_record.replica_set_update_signer = signer
# Update cnode endpoint string reconstructed from sp ID
creator_node_endpoint_str = get_endpoint_string_from_sp_ids(
update_task, primary, secondaries, redis
)
user_record.creator_node_endpoint = creator_node_endpoint_str
user_replica_set_events_lookup[user_id]["user"] = user_record
user_replica_set_events_lookup[user_id]["events"].append(
event_type
)
# Process L2 Content Node operations
elif (
event_type
== user_replica_set_manager_event_types_lookup[
"add_or_update_content_node"
]
):
cnode_record = parse_ursm_cnode_record(
update_task,
entry,
cnode_events_lookup[cnode_sp_id]["content_node"],
)
if cnode_record is not None:
cnode_events_lookup[cnode_sp_id][
"content_node"
] = cnode_record
cnode_events_lookup[cnode_sp_id]["events"].append(
event_type
)
except Exception as e:
logger.info("Error in parse user replica set transaction")
event_blockhash = update_task.web3.toHex(block_hash)
raise IndexingError(
"user_replica_set",
block_number,
event_blockhash,
txhash,
str(e),
) from e
num_user_replica_set_changes += len(user_events_tx)
# for each record in user_replica_set_events_lookup, invalidate the old record and add the new record
# we do this after all processing has completed so the user record is atomic by block, not tx
for user_id, value_obj in user_replica_set_events_lookup.items():
logger.info(
f"index.py | user_replica_set.py | Replica Set Processing Adding {value_obj['user']}"
)
invalidate_old_user(session, user_id)
session.add(value_obj["user"])
for content_node_id, value_obj in cnode_events_lookup.items():
logger.info(
f"index.py | user_replica_set.py | Content Node Processing Adding {value_obj['content_node']}"
)
invalidate_old_cnode_record(session, content_node_id)
session.add(value_obj["content_node"])
return num_user_replica_set_changes, user_ids
# Reconstruct endpoint string from primary and secondary IDs
# Attempt to retrieve from cached values populated in index_network_peers.py
# If unavailable, then a fallback to ethereum mainnet contracts will occur
# Note that in the case of an invalid spID - one that is not yet registered on
# the ethereum mainnet contracts, there will be an empty value in the returned
# creator_node_endpoint
# If this discrepancy occurs, a client replica set health check sweep will
# result in a client-initiated failover operation to a valid set of replicas
def get_endpoint_string_from_sp_ids(update_task, primary, secondaries, redis):
sp_factory_inst = None
endpoint_string = None
primary_endpoint = None
try:
sp_factory_inst, primary_endpoint = get_endpoint_from_id(
update_task, sp_factory_inst, primary
)
endpoint_string = f"{primary_endpoint}"
for secondary_id in secondaries:
secondary_endpoint = None
sp_factory_inst, secondary_endpoint = get_endpoint_from_id(
update_task, sp_factory_inst, secondary_id
)
# Conditionally log if endpoint is None after fetching
if not secondary_endpoint:
logger.info(
f"index.py | user_replica_set.py | Failed to find secondary info for {secondary_id}"
)
# Append to endpoint string regardless of status
endpoint_string = f"{endpoint_string},{secondary_endpoint}"
except Exception as exc:
logger.error(
f"index.py | user_replica_set.py | ERROR in get_endpoint_string_from_sp_ids {exc}"
)
raise exc
logger.info(
f"index.py | user_replica_set.py | constructed:"
f"{endpoint_string} from {primary},{secondaries}",
exc_info=True,
)
return endpoint_string
# Helper function to query endpoint in ursm cnode record parsing
def get_ursm_cnode_endpoint(update_task, sp_id):
endpoint = None
sp_factory_inst = None
try:
sp_factory_inst, endpoint = get_endpoint_from_id(
update_task, sp_factory_inst, sp_id
)
except Exception as exc:
logger.error(
f"index.py | user_replica_set.py | ERROR in get_ursm_cnode_endpoint {exc}",
exc_info=True,
)
raise exc
return endpoint
# Initializes sp_factory if necessary and retrieves spID
# Returns initialized instance of contract and endpoint
def get_endpoint_from_id(update_task, sp_factory_inst, sp_id):
endpoint = None
# Get sp_id cache key
cache_key = get_sp_id_key(sp_id)
# Attempt to fetch from cache
sp_info_cached = get_pickled_key(update_task.redis, cache_key)
if sp_info_cached:
endpoint = sp_info_cached[1]
logger.info(
f"index.py | user_replica_set.py | CACHE HIT FOR {cache_key}, found {sp_info_cached}"
)
return sp_factory_inst, endpoint
if not endpoint:
logger.info(
f"index.py | user_replica_set.py | CACHE MISS FOR {cache_key}, found {sp_info_cached}"
)
if sp_factory_inst is None:
sp_factory_inst = get_sp_factory_inst(update_task)
cn_endpoint_info = sp_factory_inst.functions.getServiceEndpointInfo(
content_node_service_type, sp_id
).call()
logger.info(
f"index.py | user_replica_set.py | spID={sp_id} fetched {cn_endpoint_info}"
)
endpoint = cn_endpoint_info[1]
return sp_factory_inst, endpoint
# Return instance of ServiceProviderFactory initialized with configs
def get_sp_factory_inst(update_task):
shared_config = update_task.shared_config
eth_web3 = update_task.eth_web3
eth_registry_address = eth_web3.toChecksumAddress(
shared_config["eth_contracts"]["registry"]
)
eth_registry_instance = eth_web3.eth.contract(
address=eth_registry_address, abi=get_eth_abi_values()["Registry"]["abi"]
)
sp_factory_address = eth_registry_instance.functions.getContract(
sp_factory_registry_key
).call()
sp_factory_inst = eth_web3.eth.contract(
address=sp_factory_address,
abi=get_eth_abi_values()["ServiceProviderFactory"]["abi"],
)
return sp_factory_inst
# Update cnode_record with event arguments
def parse_ursm_cnode_record(update_task, entry, cnode_record):
event_args = entry["args"]
cnode_record.delegate_owner_wallet = event_args._cnodeDelegateOwnerWallet
cnode_record.owner_wallet = event_args._cnodeOwnerWallet
cnode_record.proposer_1_delegate_owner_wallet = (
event_args._proposer1DelegateOwnerWallet
)
cnode_record.proposer_2_delegate_owner_wallet = (
event_args._proposer2DelegateOwnerWallet
)
cnode_record.proposer_3_delegate_owner_wallet = (
event_args._proposer3DelegateOwnerWallet
)
cnode_record.proposer_sp_ids = event_args._proposerSpIds
# Retrieve endpoint from eth contracts
cnode_sp_id = event_args._cnodeSpId
cnode_record.endpoint = get_ursm_cnode_endpoint(update_task, cnode_sp_id)
return cnode_record
# Return or create instance of record pointing to this content_node
def lookup_ursm_cnode(
update_task, session, entry, block_number, block_timestamp, txhash
):
event_blockhash = update_task.web3.toHex(entry.blockHash)
event_args = entry["args"]
# Arguments from the event
cnode_sp_id = event_args._cnodeSpId
cnode_record_exists = (
session.query(URSMContentNode).filter_by(cnode_sp_id=cnode_sp_id).count() > 0
)
cnode_record = None
if cnode_record_exists:
cnode_record = (
session.query(URSMContentNode)
.filter(
URSMContentNode.cnode_sp_id == cnode_sp_id,
URSMContentNode.is_current == True,
)
.first()
)
# expunge the result from sqlalchemy so we can modify it without UPDATE statements being made
# https://stackoverflow.com/questions/28871406/how-to-clone-a-sqlalchemy-db-object-with-new-primary-key
session.expunge(cnode_record)
make_transient(cnode_record)
else:
cnode_record = URSMContentNode(
is_current=True,
cnode_sp_id=cnode_sp_id,
created_at=datetime.utcfromtimestamp(block_timestamp),
)
# update these fields regardless of type
cnode_record.blockhash = event_blockhash
cnode_record.blocknumber = block_number
cnode_record.txhash = txhash
return cnode_record
def invalidate_old_cnode_record(session, cnode_sp_id):
cnode_record_exists = (
session.query(URSMContentNode).filter_by(cnode_sp_id=cnode_sp_id).count() > 0
)
if cnode_record_exists:
num_invalidated_records = (
session.query(URSMContentNode)
.filter(
URSMContentNode.cnode_sp_id == cnode_sp_id,
URSMContentNode.is_current == True,
)
.update({"is_current": False})
)
assert (
num_invalidated_records > 0
), "Update operation requires a current cnode to be invalidated"
| 41.023936
| 115
| 0.617504
|
0c9374c83152020a2369d34922c89a81a70c3839
| 4,102
|
py
|
Python
|
scripts/main_endtoend.py
|
kumachan-mis/py-pdf-term
|
282505826ce8c626003e753068d15738d772ce46
|
[
"MIT"
] | null | null | null |
scripts/main_endtoend.py
|
kumachan-mis/py-pdf-term
|
282505826ce8c626003e753068d15738d772ce46
|
[
"MIT"
] | 1
|
2021-08-02T13:02:12.000Z
|
2021-08-02T13:02:12.000Z
|
scripts/main_endtoend.py
|
kumachan-mis/py-pdf-term
|
282505826ce8c626003e753068d15738d772ce46
|
[
"MIT"
] | null | null | null |
import json
import os
from argparse import ArgumentParser
from py_pdf_term import PyPDFTermExtractor
from py_pdf_term.configs import MethodLayerConfig
from scripts.utils import (
generate_domain_pdfs,
get_domains,
pdf_to_techterm_path,
relpath_from_basedir,
)
script_name = os.path.basename(__file__)
if __name__ == "__main__":
parser = ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--mcvalue", help="use MC-Value method", action="store_true")
group.add_argument("--tfidf", help="use TF-IDF method", action="store_true")
group.add_argument("--lfidf", help="use LF-IDF method", action="store_true")
group.add_argument("--flr", help="use FLR method", action="store_true")
group.add_argument("--hits", help="use HITS method", action="store_true")
group.add_argument("--flrh", help="use FLRH method", action="store_true")
group.add_argument("--mdp", help="use MDP method", action="store_true")
args = parser.parse_args()
if args.mcvalue:
method_type = "single"
method_name = "mcvalue"
method = "py_pdf_term.MCValueMethod"
elif args.tfidf:
method_type = "multi"
method_name = "tfidf"
method = "py_pdf_term.TFIDFMethod"
elif args.lfidf:
method_type = "multi"
method_name = "lfidf"
method = "py_pdf_term.LFIDFMethod"
elif args.flr:
method_type = "single"
method_name = "flr"
method = "py_pdf_term.FLRMethod"
elif args.hits:
method_type = "single"
method_name = "hits"
method = "py_pdf_term.HITSMethod"
elif args.flrh:
method_type = "single"
method_name = "flrh"
method = "py_pdf_term.FLRHMethod"
elif args.mdp:
method_type = "multi"
method_name = "mdp"
method = "py_pdf_term.MDPMethod"
else:
raise RuntimeError("unreachable statement")
extractor = PyPDFTermExtractor(
method_config=MethodLayerConfig(
method_type=method_type,
method=method,
data_cache="py_pdf_term.MethodLayerDataNoCache",
)
)
file_name = f"{method_name}.json"
domains = get_domains()
if method_type == "single":
domain_pdfs_list = generate_domain_pdfs(domains)
for domain_pdfs in domain_pdfs_list:
for pdf_path in domain_pdfs.pdf_paths:
techterm_path = pdf_to_techterm_path(pdf_path, method_name)
techterm_short_path = relpath_from_basedir(techterm_path)
print(f"{script_name}: creating {techterm_short_path} ...")
pdf_techterms = extractor.extract(
domain_pdfs.domain, pdf_path, single_domain_pdfs=domain_pdfs
)
techterm_dir_name = os.path.dirname(techterm_path)
os.makedirs(techterm_dir_name, exist_ok=True)
with open(techterm_path, "w") as techterm_file:
dict_obj = pdf_techterms.to_dict()
json.dump(dict_obj, techterm_file, ensure_ascii=False, indent=2)
elif method_type == "multi":
domain_pdfs_list = list(generate_domain_pdfs(domains))
for domain_pdfs in domain_pdfs_list:
for pdf_path in domain_pdfs.pdf_paths:
techterm_path = pdf_to_techterm_path(pdf_path, method_name)
techterm_short_path = relpath_from_basedir(techterm_path)
print(f"{script_name}: creating {techterm_short_path} ...")
pdf_techterms = extractor.extract(
domain_pdfs.domain, pdf_path, multi_domain_pdfs=domain_pdfs_list
)
techterm_dir_name = os.path.dirname(techterm_path)
os.makedirs(techterm_dir_name, exist_ok=True)
with open(techterm_path, "w") as techterm_file:
dict_obj = pdf_techterms.to_dict()
json.dump(dict_obj, techterm_file, ensure_ascii=False, indent=2)
else:
raise RuntimeError("unreachable statement")
| 37.633028
| 84
| 0.643101
|
1ae3cb855bb8f4ce5edd1e6dcbf816f3a20c763a
| 1,000
|
py
|
Python
|
test/test_managed_member_create_request.py
|
mxenabled/mx-platform-python
|
060dae7ddb02fdcf41fa7f7aebfa4b8a0273afac
|
[
"MIT"
] | null | null | null |
test/test_managed_member_create_request.py
|
mxenabled/mx-platform-python
|
060dae7ddb02fdcf41fa7f7aebfa4b8a0273afac
|
[
"MIT"
] | 14
|
2021-11-30T21:56:19.000Z
|
2022-02-07T18:47:10.000Z
|
test/test_managed_member_create_request.py
|
mxenabled/mx-platform-python
|
060dae7ddb02fdcf41fa7f7aebfa4b8a0273afac
|
[
"MIT"
] | 1
|
2022-01-12T14:59:39.000Z
|
2022-01-12T14:59:39.000Z
|
"""
MX Platform API
The MX Platform API is a powerful, fully-featured API designed to make aggregating and enhancing financial data easy and reliable. It can seamlessly connect your app or website to tens of thousands of financial institutions. # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import mx_platform_python
from mx_platform_python.model.managed_member_create_request import ManagedMemberCreateRequest
class TestManagedMemberCreateRequest(unittest.TestCase):
"""ManagedMemberCreateRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testManagedMemberCreateRequest(self):
"""Test ManagedMemberCreateRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = ManagedMemberCreateRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 27.777778
| 242
| 0.736
|
2d5b7c94037424714b5bae20694dd2734b6bcddb
| 8,512
|
pyw
|
Python
|
cryptonotes.pyw
|
domreichl/cryptonotes
|
15ce7706d87288a0cf52149d107179a8e3f2eb84
|
[
"Apache-2.0"
] | null | null | null |
cryptonotes.pyw
|
domreichl/cryptonotes
|
15ce7706d87288a0cf52149d107179a8e3f2eb84
|
[
"Apache-2.0"
] | null | null | null |
cryptonotes.pyw
|
domreichl/cryptonotes
|
15ce7706d87288a0cf52149d107179a8e3f2eb84
|
[
"Apache-2.0"
] | null | null | null |
#! python3
# cryptonotes.pyw by Dom Reichl
import string
from random import shuffle
from tkinter import *
from tkinter import filedialog, messagebox
class Cryptonotes(Frame):
''' Builds a simple notepad application with encryption function (substitution cipher). '''
def __init__(self, master=None, file=None):
'''
Initializes master frame with pack geometry.
Creates a field for entering text and a menu bar for file and edit functions.
Assigns character pool for encryption as string to variable 'self.characters'.
'''
Frame.__init__(self, master)
self.pack(expand=YES, fill=BOTH)
self.create_textfield()
self.create_menubar()
self.characters = string.ascii_letters + string.digits + '§$&/=?`.°"_,: ;'
def create_textfield(self):
''' Creates text entry widget with dynamic scrollbar. '''
scrollbar = Scrollbar(self)
textarea = Text(self)
scrollbar.config(command=textarea.yview) # scrollbar interaction
textarea.config(yscrollcommand=scrollbar.set) # dynamic scrollbar length
scrollbar.pack(side=RIGHT, fill=Y)
textarea.pack(side=LEFT, expand=YES, fill=BOTH)
self.text = textarea
self.text.focus() # saves user one click
def create_menubar(self):
''' Creates menu widget for file and edit functions. '''
# create menu widget
menubar = Menu(self)
self.master.config(menu=menubar)
# create cascade for file functions
file = Menu(menubar, tearoff=False)
menubar.add_cascade(label="File", menu=file)
file.add_command(label='New', command=self.new)
# create subcascade for opening files
openfile = Menu(file, tearoff=False)
file.add_cascade(label='Open ', menu=openfile)
openfile.add_command(label='Normal', command=self.normal_open)
openfile.add_command(label='Decrypt', command=self.decrypt_open)
# create subcascade for saving files
savefile = Menu(file, tearoff=False)
file.add_cascade(label='Save ', menu=savefile)
savefile.add_command(label='Normal', command=self.normal_save)
savefile.add_command(label='Encrypt', command=self.encrypt_save)
file.add_command(label='Close', command=root.destroy)
# create cascade for edit functions
edit = Menu(menubar, tearoff=False)
menubar.add_cascade(label='Edit', menu=edit)
edit.add_command(label='Cut', command=self.cut)
edit.add_command(label='Copy', command=self.copy)
edit.add_command(label='Paste', command=self.paste)
def new(self):
''' Empties text area. '''
self.text.delete('1.0', END)
def normal_open(self):
''' Opens text file in normal mode. '''
loadedfile = filedialog.askopenfile(filetypes=[('Text File', '.txt')]) # dialog box for selecting file
if loadedfile == None: # make sure user has not canceled file selection
return
else:
normal_text = loadedfile.read()
if normal_text.startswith("***encrypted file***"): # check whether file is normal or encrypted
normal_text = normal_text.strip("***encrypted file***")
messagebox.showwarning("Dom's Cryptonotes", "This file is encrypted.")
self.new() # empty text area
self.text.insert('1.0', normal_text)
def normal_save(self):
''' Saves text as file in normal mode. '''
filename = filedialog.asksaveasfilename(filetypes=[('Text File', '.txt')])
if filename == "": # make sure user has not canceled the dialog box
return
else:
with open(filename + '.txt', 'w') as fn:
fn.write(self.text.get('1.0', END+'-1c'))
messagebox.showinfo("Dom's Cryptonotes", "File saved.")
def decrypt_open(self):
''' Opens text file in decryption mode. '''
loadedfile = filedialog.askopenfile(filetypes=[('Text File', '.txt')])
if loadedfile == None: # make sure user has not canceled file selection
return
else:
encrypted_text = loadedfile.read()
normal_text = self.decrypt(encrypted_text)
self.new() # empty text area
self.text.insert('1.0', normal_text)
def decrypt(self, encrypted_text):
''' Decrypts text by resubstituting characters. '''
if encrypted_text.startswith("***encrypted file***"): # check whether file is actually encrypted
encrypted_text = encrypted_text.strip("***encrypted file***") # remove encryption tag
else:
messagebox.showwarning("Dom's Cryptonotes", "This file is not encrypted.")
return encrypted_text # returns text to insert into text widget without further processing
key_length = len(self.characters) # get length of encryption key
key = encrypted_text[:5] + encrypted_text[-key_length+5:] # extract key from text
ciphertext = encrypted_text[5:-key_length+5] # extract actual text
decrypted_text = ""
for i in range(len(ciphertext)): # iterate through every character in the text
if ciphertext[i] in self.characters:
for j in range(key_length): # iterate through every character in the key
if ciphertext[i] == key[j]:
decrypted_text = decrypted_text + self.characters[j] # resubstitute character
else: # some special characters don't need decryption
decrypted_text = decrypted_text + ciphertext[i]
return decrypted_text
def encrypt_save(self):
''' Saves text as file in encryption mode. '''
filename = filedialog.asksaveasfilename(filetypes=[('Text File', '.txt')])
if filename == "": # make sure user has not canceled the dialog box
return
else:
with open(filename + '.txt', 'w') as fn:
fn.write(self.encrypt(self.text.get('1.0', END+'-1c'))) # get text, encrypt it, and write it into file
messagebox.showinfo("Dom's Cryptonotes", "File encrypted and saved.")
def encrypt(self, normal_text):
''' Encrypts text with substitution cipher. '''
charlist = [i for i in self.characters] # turns string into list
shuffle(charlist) # randomizes characters in list
ciphertext = ""
for i in normal_text:
if i in self.characters:
ciphertext = ciphertext + charlist[self.characters.index(i)] # substitute character
else: # some special characters aren't substituted
ciphertext = ciphertext + i
key = ''.join(charlist) # turn shuffled character list into string
encrypted_text = "***encrypted file***" + key[:5] + ciphertext + key[5:] # add encryption tag and enclose text within two parts of the key string
return encrypted_text
def cut(self):
''' Allows user to cut selected text. '''
self.copy() # calls function to store text in clipboard
self.text.delete(SEL_FIRST, SEL_LAST)
def copy(self):
''' Allows user to copy selected text to clipboard. '''
if not self.text.tag_ranges(SEL):
messagebox.showerror("Dom's Cryptonotes", "No text selected.")
else:
selected_text = self.text.get(SEL_FIRST, SEL_LAST)
self.clipboard_clear()
self.clipboard_append(selected_text)
def paste(self):
''' Allows user to paste text from clipboard. '''
try:
pastetext = self.clipboard_get()
except TclError:
showerror("Dom's Cryptonotes", "Nothing to paste")
return
if self.text.tag_ranges(SEL):
self.text.delete(SEL_FIRST, SEL_LAST) # delete selected text
self.text.insert(INSERT, pastetext) # insert text
# select pasted text
self.text.tag_remove(SEL, '1.0', END)
self.text.tag_add(SEL, INSERT+'-%dc' % len(pastetext), INSERT)
self.text.see(INSERT)
root = Tk()
app = Cryptonotes(master=root)
app.master.title("Dom's Cryptonotes")
app.mainloop()
| 43.651282
| 154
| 0.607495
|
6b7281885d83984209333f5d1d2227fd9f803e82
| 26,729
|
py
|
Python
|
src/pyscripts/sqlalchemy/orm/dependency.py
|
paddybu/xsbs
|
3810eab4cd91d9a927ac724a4892763a5ff562a5
|
[
"BSD-3-Clause"
] | 2
|
2020-07-02T21:50:43.000Z
|
2021-01-12T09:26:25.000Z
|
src/pyscripts/sqlalchemy/orm/dependency.py
|
greghaynes/xsbs
|
a14bd4c96733a8cf01a8a01200d22868b3335c4f
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyscripts/sqlalchemy/orm/dependency.py
|
greghaynes/xsbs
|
a14bd4c96733a8cf01a8a01200d22868b3335c4f
|
[
"BSD-3-Clause"
] | null | null | null |
# orm/dependency.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Relationship dependencies.
Bridges the ``PropertyLoader`` (i.e. a ``relation()``) and the
``UOWTransaction`` together to allow processing of relation()-based
dependencies at flush time.
"""
from sqlalchemy import sql, util
import sqlalchemy.exceptions as sa_exc
from sqlalchemy.orm import attributes, exc, sync
from sqlalchemy.orm.interfaces import ONETOMANY, MANYTOONE, MANYTOMANY
def create_dependency_processor(prop):
types = {
ONETOMANY : OneToManyDP,
MANYTOONE: ManyToOneDP,
MANYTOMANY : ManyToManyDP,
}
return types[prop.direction](prop)
class DependencyProcessor(object):
no_dependencies = False
def __init__(self, prop):
self.prop = prop
self.cascade = prop.cascade
self.mapper = prop.mapper
self.parent = prop.parent
self.secondary = prop.secondary
self.direction = prop.direction
self.post_update = prop.post_update
self.passive_deletes = prop.passive_deletes
self.passive_updates = prop.passive_updates
self.enable_typechecks = prop.enable_typechecks
self.key = prop.key
self.dependency_marker = MapperStub(self.parent, self.mapper, self.key)
if not self.prop.synchronize_pairs:
raise sa_exc.ArgumentError("Can't build a DependencyProcessor for relation %s. "
"No target attributes to populate between parent and child are present" % self.prop)
def _get_instrumented_attribute(self):
"""Return the ``InstrumentedAttribute`` handled by this
``DependencyProecssor``.
"""
return self.parent.class_manager.get_impl(self.key)
def hasparent(self, state):
"""return True if the given object instance has a parent,
according to the ``InstrumentedAttribute`` handled by this ``DependencyProcessor``.
"""
# TODO: use correct API for this
return self._get_instrumented_attribute().hasparent(state)
def register_dependencies(self, uowcommit):
"""Tell a ``UOWTransaction`` what mappers are dependent on
which, with regards to the two or three mappers handled by
this ``DependencyProcessor``.
"""
raise NotImplementedError()
def register_processors(self, uowcommit):
"""Tell a ``UOWTransaction`` about this object as a processor,
which will be executed after that mapper's objects have been
saved or before they've been deleted. The process operation
manages attributes and dependent operations between two mappers.
"""
raise NotImplementedError()
def whose_dependent_on_who(self, state1, state2):
"""Given an object pair assuming `obj2` is a child of `obj1`,
return a tuple with the dependent object second, or None if
there is no dependency.
"""
if state1 is state2:
return None
elif self.direction == ONETOMANY:
return (state1, state2)
else:
return (state2, state1)
def process_dependencies(self, task, deplist, uowcommit, delete = False):
"""This method is called during a flush operation to
synchronize data between a parent and child object.
It is called within the context of the various mappers and
sometimes individual objects sorted according to their
insert/update/delete order (topological sort).
"""
raise NotImplementedError()
def preprocess_dependencies(self, task, deplist, uowcommit, delete = False):
"""Used before the flushes' topological sort to traverse
through related objects and ensure every instance which will
require save/update/delete is properly added to the
UOWTransaction.
"""
raise NotImplementedError()
def _verify_canload(self, state):
if state is not None and not self.mapper._canload(state, allow_subtypes=not self.enable_typechecks):
if self.mapper._canload(state, allow_subtypes=True):
raise exc.FlushError("Attempting to flush an item of type %s on collection '%s', "
"which is not the expected type %s. Configure mapper '%s' to load this "
"subtype polymorphically, or set enable_typechecks=False to allow subtypes. "
"Mismatched typeloading may cause bi-directional relationships (backrefs) "
"to not function properly." % (state.class_, self.prop, self.mapper.class_, self.mapper))
else:
raise exc.FlushError("Attempting to flush an item of type %s on collection '%s', "
"whose mapper does not inherit from that of %s." % (state.class_, self.prop, self.mapper.class_))
def _synchronize(self, state, child, associationrow, clearkeys, uowcommit):
"""Called during a flush to synchronize primary key identifier
values between a parent/child object, as well as to an
associationrow in the case of many-to-many.
"""
raise NotImplementedError()
def _check_reverse_action(self, uowcommit, parent, child, action):
"""Determine if an action has been performed by the 'reverse' property of this property.
this is used to ensure that only one side of a bidirectional relation
issues a certain operation for a parent/child pair.
"""
for r in self.prop._reverse_property:
if (r._dependency_processor, action, parent, child) in uowcommit.attributes:
return True
return False
def _performed_action(self, uowcommit, parent, child, action):
"""Establish that an action has been performed for a certain parent/child pair.
Used only for actions that are sensitive to bidirectional double-action,
i.e. manytomany, post_update.
"""
uowcommit.attributes[(self, action, parent, child)] = True
def _conditional_post_update(self, state, uowcommit, related):
"""Execute a post_update call.
For relations that contain the post_update flag, an additional
``UPDATE`` statement may be associated after an ``INSERT`` or
before a ``DELETE`` in order to resolve circular row
dependencies.
This method will check for the post_update flag being set on a
particular relationship, and given a target object and list of
one or more related objects, and execute the ``UPDATE`` if the
given related object list contains ``INSERT``s or ``DELETE``s.
"""
if state is not None and self.post_update:
for x in related:
if x is not None and not self._check_reverse_action(uowcommit, x, state, "postupdate"):
uowcommit.register_object(state, postupdate=True, post_update_cols=[r for l, r in self.prop.synchronize_pairs])
self._performed_action(uowcommit, x, state, "postupdate")
break
def _pks_changed(self, uowcommit, state):
raise NotImplementedError()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.prop)
class OneToManyDP(DependencyProcessor):
def register_dependencies(self, uowcommit):
if self.post_update:
uowcommit.register_dependency(self.mapper, self.dependency_marker)
uowcommit.register_dependency(self.parent, self.dependency_marker)
else:
uowcommit.register_dependency(self.parent, self.mapper)
def register_processors(self, uowcommit):
if self.post_update:
uowcommit.register_processor(self.dependency_marker, self, self.parent)
else:
uowcommit.register_processor(self.parent, self, self.parent)
def process_dependencies(self, task, deplist, uowcommit, delete = False):
if delete:
# head object is being deleted, and we manage its list of child objects
# the child objects have to have their foreign key to the parent set to NULL
# this phase can be called safely for any cascade but is unnecessary if delete cascade
# is on.
if self.post_update or not self.passive_deletes == 'all':
for state in deplist:
history = uowcommit.get_attribute_history(state, self.key, passive=self.passive_deletes)
if history:
for child in history.deleted:
if child is not None and self.hasparent(child) is False:
self._synchronize(state, child, None, True, uowcommit)
self._conditional_post_update(child, uowcommit, [state])
if self.post_update or not self.cascade.delete:
for child in history.unchanged:
if child is not None:
self._synchronize(state, child, None, True, uowcommit)
self._conditional_post_update(child, uowcommit, [state])
else:
for state in deplist:
history = uowcommit.get_attribute_history(state, self.key, passive=True)
if history:
for child in history.added:
self._synchronize(state, child, None, False, uowcommit)
if child is not None:
self._conditional_post_update(child, uowcommit, [state])
for child in history.deleted:
if not self.cascade.delete_orphan and not self.hasparent(child):
self._synchronize(state, child, None, True, uowcommit)
if self._pks_changed(uowcommit, state):
for child in history.unchanged:
self._synchronize(state, child, None, False, uowcommit)
def preprocess_dependencies(self, task, deplist, uowcommit, delete = False):
if delete:
# head object is being deleted, and we manage its list of child objects
# the child objects have to have their foreign key to the parent set to NULL
if not self.post_update:
should_null_fks = not self.cascade.delete and not self.passive_deletes == 'all'
for state in deplist:
history = uowcommit.get_attribute_history(state, self.key, passive=self.passive_deletes)
if history:
for child in history.deleted:
if child is not None and self.hasparent(child) is False:
if self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=True)
else:
uowcommit.register_object(child)
if should_null_fks:
for child in history.unchanged:
if child is not None:
uowcommit.register_object(child)
else:
for state in deplist:
history = uowcommit.get_attribute_history(state, self.key, passive=True)
if history:
for child in history.added:
if child is not None:
uowcommit.register_object(child)
for child in history.deleted:
if not self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=False)
elif self.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True)
for c, m in self.mapper.cascade_iterator('delete', child):
uowcommit.register_object(
attributes.instance_state(c),
isdelete=True)
if self._pks_changed(uowcommit, state):
if not history:
history = uowcommit.get_attribute_history(state, self.key, passive=self.passive_updates)
if history:
for child in history.unchanged:
if child is not None:
uowcommit.register_object(child)
def _synchronize(self, state, child, associationrow, clearkeys, uowcommit):
source = state
dest = child
if dest is None or (not self.post_update and uowcommit.is_deleted(dest)):
return
self._verify_canload(child)
if clearkeys:
sync.clear(dest, self.mapper, self.prop.synchronize_pairs)
else:
sync.populate(source, self.parent, dest, self.mapper, self.prop.synchronize_pairs)
def _pks_changed(self, uowcommit, state):
return sync.source_modified(uowcommit, state, self.parent, self.prop.synchronize_pairs)
class DetectKeySwitch(DependencyProcessor):
"""a special DP that works for many-to-one relations, fires off for
child items who have changed their referenced key."""
no_dependencies = True
def register_dependencies(self, uowcommit):
pass
def register_processors(self, uowcommit):
uowcommit.register_processor(self.parent, self, self.mapper)
def preprocess_dependencies(self, task, deplist, uowcommit, delete=False):
# for non-passive updates, register in the preprocess stage
# so that mapper save_obj() gets a hold of changes
if not delete and not self.passive_updates:
self._process_key_switches(deplist, uowcommit)
def process_dependencies(self, task, deplist, uowcommit, delete=False):
# for passive updates, register objects in the process stage
# so that we avoid ManyToOneDP's registering the object without
# the listonly flag in its own preprocess stage (results in UPDATE)
# statements being emitted
if not delete and self.passive_updates:
self._process_key_switches(deplist, uowcommit)
def _process_key_switches(self, deplist, uowcommit):
switchers = set(s for s in deplist if self._pks_changed(uowcommit, s))
if switchers:
# yes, we're doing a linear search right now through the UOW. only
# takes effect when primary key values have actually changed.
# a possible optimization might be to enhance the "hasparents" capability of
# attributes to actually store all parent references, but this introduces
# more complicated attribute accounting.
for s in [elem for elem in uowcommit.session.identity_map.all_states()
if issubclass(elem.class_, self.parent.class_) and
self.key in elem.dict and
elem.dict[self.key] is not None and
attributes.instance_state(elem.dict[self.key]) in switchers
]:
uowcommit.register_object(s)
sync.populate(attributes.instance_state(s.dict[self.key]), self.mapper, s, self.parent, self.prop.synchronize_pairs)
def _pks_changed(self, uowcommit, state):
return sync.source_modified(uowcommit, state, self.mapper, self.prop.synchronize_pairs)
class ManyToOneDP(DependencyProcessor):
def __init__(self, prop):
DependencyProcessor.__init__(self, prop)
self.mapper._dependency_processors.append(DetectKeySwitch(prop))
def register_dependencies(self, uowcommit):
if self.post_update:
uowcommit.register_dependency(self.mapper, self.dependency_marker)
uowcommit.register_dependency(self.parent, self.dependency_marker)
else:
uowcommit.register_dependency(self.mapper, self.parent)
def register_processors(self, uowcommit):
if self.post_update:
uowcommit.register_processor(self.dependency_marker, self, self.parent)
else:
uowcommit.register_processor(self.mapper, self, self.parent)
def process_dependencies(self, task, deplist, uowcommit, delete=False):
if delete:
if self.post_update and not self.cascade.delete_orphan and not self.passive_deletes == 'all':
# post_update means we have to update our row to not reference the child object
# before we can DELETE the row
for state in deplist:
self._synchronize(state, None, None, True, uowcommit)
history = uowcommit.get_attribute_history(state, self.key, passive=self.passive_deletes)
if history:
self._conditional_post_update(state, uowcommit, history.sum())
else:
for state in deplist:
history = uowcommit.get_attribute_history(state, self.key, passive=True)
if history:
for child in history.added:
self._synchronize(state, child, None, False, uowcommit)
self._conditional_post_update(state, uowcommit, history.sum())
def preprocess_dependencies(self, task, deplist, uowcommit, delete=False):
if self.post_update:
return
if delete:
if self.cascade.delete or self.cascade.delete_orphan:
for state in deplist:
history = uowcommit.get_attribute_history(state, self.key, passive=self.passive_deletes)
if history:
if self.cascade.delete_orphan:
todelete = history.sum()
else:
todelete = history.non_deleted()
for child in todelete:
if child is None:
continue
uowcommit.register_object(child, isdelete=True)
for c, m in self.mapper.cascade_iterator('delete', child):
uowcommit.register_object(
attributes.instance_state(c), isdelete=True)
else:
for state in deplist:
uowcommit.register_object(state)
if self.cascade.delete_orphan:
history = uowcommit.get_attribute_history(state, self.key, passive=self.passive_deletes)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True)
for c, m in self.mapper.cascade_iterator('delete', child):
uowcommit.register_object(
attributes.instance_state(c),
isdelete=True)
def _synchronize(self, state, child, associationrow, clearkeys, uowcommit):
if state is None or (not self.post_update and uowcommit.is_deleted(state)):
return
if clearkeys or child is None:
sync.clear(state, self.parent, self.prop.synchronize_pairs)
else:
self._verify_canload(child)
sync.populate(child, self.mapper, state, self.parent, self.prop.synchronize_pairs)
class ManyToManyDP(DependencyProcessor):
def register_dependencies(self, uowcommit):
# many-to-many. create a "Stub" mapper to represent the
# "middle table" in the relationship. This stub mapper doesnt save
# or delete any objects, but just marks a dependency on the two
# related mappers. its dependency processor then populates the
# association table.
uowcommit.register_dependency(self.parent, self.dependency_marker)
uowcommit.register_dependency(self.mapper, self.dependency_marker)
def register_processors(self, uowcommit):
uowcommit.register_processor(self.dependency_marker, self, self.parent)
def process_dependencies(self, task, deplist, uowcommit, delete = False):
connection = uowcommit.transaction.connection(self.mapper)
secondary_delete = []
secondary_insert = []
secondary_update = []
if delete:
for state in deplist:
history = uowcommit.get_attribute_history(state, self.key, passive=self.passive_deletes)
if history:
for child in history.non_added():
if child is None or self._check_reverse_action(uowcommit, child, state, "manytomany"):
continue
associationrow = {}
self._synchronize(state, child, associationrow, False, uowcommit)
secondary_delete.append(associationrow)
self._performed_action(uowcommit, state, child, "manytomany")
else:
for state in deplist:
history = uowcommit.get_attribute_history(state, self.key)
if history:
for child in history.added:
if child is None or self._check_reverse_action(uowcommit, child, state, "manytomany"):
continue
associationrow = {}
self._synchronize(state, child, associationrow, False, uowcommit)
self._performed_action(uowcommit, state, child, "manytomany")
secondary_insert.append(associationrow)
for child in history.deleted:
if child is None or self._check_reverse_action(uowcommit, child, state, "manytomany"):
continue
associationrow = {}
self._synchronize(state, child, associationrow, False, uowcommit)
self._performed_action(uowcommit, state, child, "manytomany")
secondary_delete.append(associationrow)
if not self.passive_updates and self._pks_changed(uowcommit, state):
if not history:
history = uowcommit.get_attribute_history(state, self.key, passive=False)
for child in history.unchanged:
associationrow = {}
sync.update(state, self.parent, associationrow, "old_", self.prop.synchronize_pairs)
sync.update(child, self.mapper, associationrow, "old_", self.prop.secondary_synchronize_pairs)
#self.syncrules.update(associationrow, state, child, "old_")
secondary_update.append(associationrow)
if secondary_delete:
statement = self.secondary.delete(sql.and_(*[
c == sql.bindparam(c.key, type_=c.type) for c in self.secondary.c if c.key in associationrow
]))
result = connection.execute(statement, secondary_delete)
if result.supports_sane_multi_rowcount() and result.rowcount != len(secondary_delete):
raise exc.ConcurrentModificationError("Deleted rowcount %d does not match number of "
"secondary table rows deleted from table '%s': %d" %
(result.rowcount, self.secondary.description, len(secondary_delete)))
if secondary_update:
statement = self.secondary.update(sql.and_(*[
c == sql.bindparam("old_" + c.key, type_=c.type) for c in self.secondary.c if c.key in associationrow
]))
result = connection.execute(statement, secondary_update)
if result.supports_sane_multi_rowcount() and result.rowcount != len(secondary_update):
raise exc.ConcurrentModificationError("Updated rowcount %d does not match number of "
"secondary table rows updated from table '%s': %d" %
(result.rowcount, self.secondary.description, len(secondary_update)))
if secondary_insert:
statement = self.secondary.insert()
connection.execute(statement, secondary_insert)
def preprocess_dependencies(self, task, deplist, uowcommit, delete = False):
if not delete:
for state in deplist:
history = uowcommit.get_attribute_history(state, self.key, passive=True)
if history:
for child in history.deleted:
if self.cascade.delete_orphan and self.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True)
for c, m in self.mapper.cascade_iterator('delete', child):
uowcommit.register_object(
attributes.instance_state(c), isdelete=True)
def _synchronize(self, state, child, associationrow, clearkeys, uowcommit):
if associationrow is None:
return
self._verify_canload(child)
sync.populate_dict(state, self.parent, associationrow, self.prop.synchronize_pairs)
sync.populate_dict(child, self.mapper, associationrow, self.prop.secondary_synchronize_pairs)
def _pks_changed(self, uowcommit, state):
return sync.source_modified(uowcommit, state, self.parent, self.prop.synchronize_pairs)
class MapperStub(object):
"""Represent a many-to-many dependency within a flush
context.
The UOWTransaction corresponds dependencies to mappers.
MapperStub takes the place of the "association table"
so that a depedendency can be corresponded to it.
"""
def __init__(self, parent, mapper, key):
self.mapper = mapper
self.base_mapper = self
self.class_ = mapper.class_
self._inheriting_mappers = []
def polymorphic_iterator(self):
return iter((self,))
def _register_dependencies(self, uowcommit):
pass
def _register_procesors(self, uowcommit):
pass
def _save_obj(self, *args, **kwargs):
pass
def _delete_obj(self, *args, **kwargs):
pass
def primary_mapper(self):
return self
| 47.901434
| 133
| 0.609488
|
0be515dfe3435e32ffb84549a42bb07458ad044e
| 281
|
py
|
Python
|
QTensorAI/setup.py
|
sss441803/QFewShot
|
eec4769f8fedb8dc52a784b662dd2975b219203b
|
[
"MIT"
] | 1
|
2022-03-18T12:24:41.000Z
|
2022-03-18T12:24:41.000Z
|
QTensorAI/setup.py
|
sss441803/QFewShot
|
eec4769f8fedb8dc52a784b662dd2975b219203b
|
[
"MIT"
] | null | null | null |
QTensorAI/setup.py
|
sss441803/QFewShot
|
eec4769f8fedb8dc52a784b662dd2975b219203b
|
[
"MIT"
] | 1
|
2022-02-03T20:08:32.000Z
|
2022-02-03T20:08:32.000Z
|
from setuptools import setup, find_packages
setup(name='qtensor-ai',
version='0.1',
author='Henry Liu',
author_email='mliu6@uchicago.edu',
license='MIT',
packages=find_packages(),
install_requires=[
'numpy',
'tqdm'
])
| 21.615385
| 43
| 0.580071
|
67f5d1f33ef805a2c1fbb35bb5b2b7b8040319e2
| 866
|
py
|
Python
|
test/integrationtests/voight_kampff/__init__.py
|
Ark-Smart-Homes/mycroft-core
|
c039aebc273fb87f32d287d11b6b5622ad3d7ed4
|
[
"Apache-2.0"
] | 1
|
2021-08-19T21:31:43.000Z
|
2021-08-19T21:31:43.000Z
|
test/integrationtests/voight_kampff/__init__.py
|
Ark-Smart-Homes/mycroft-core
|
c039aebc273fb87f32d287d11b6b5622ad3d7ed4
|
[
"Apache-2.0"
] | null | null | null |
test/integrationtests/voight_kampff/__init__.py
|
Ark-Smart-Homes/mycroft-core
|
c039aebc273fb87f32d287d11b6b5622ad3d7ed4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Public API into the voight_kampff package."""
from .tools import (
emit_utterance,
format_dialog_match_error,
mycroft_responses,
print_mycroft_responses,
then_wait,
then_wait_fail,
wait_for_audio_service,
wait_for_dialog,
wait_for_dialog_match,
)
| 32.074074
| 74
| 0.752887
|
8c83bb59d00e3a55a7051f36c8e0156af5906e16
| 3,369
|
py
|
Python
|
Alignment/MuonAlignmentAlgorithms/scripts/reportVsReport.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
Alignment/MuonAlignmentAlgorithms/scripts/reportVsReport.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
Alignment/MuonAlignmentAlgorithms/scripts/reportVsReport.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
import sys, optparse
copyargs = sys.argv[:]
for i in range(len(copyargs)):
if copyargs[i] == "": copyargs[i] = "\"\""
if copyargs[i].find(" ") != -1: copyargs[i] = "\"%s\"" % copyargs[i]
commandline = " ".join(copyargs)
print commandline
prog = sys.argv[0]
usage = """Usage:
%(prog)s [options] reportX.py reportY.py
Draws a scatterplot of delta corrections from reportX vs. from reportY.
""" % vars()
parser = optparse.OptionParser(usage)
parser.add_option("-o", "--output",
help="plots' file name. If not give, an automatic file name would be given as reportVsReport_label_selection.png",
type="string",
default="",
dest="filename")
parser.add_option("-l", "--label",
help="label for an automatic filename",
type="string",
default="",
dest="label")
parser.add_option("-s", "--selection",
help="is one of the following: ALL, DT, CSC, CSCE1, CSCE2",
type="string",
default="ALL",
dest="selection")
parser.add_option("-x", "--xlabel",
help="prefix to add to plots' X axis",
type="string",
default="None",
dest="xlabel")
parser.add_option("-y", "--ylabel",
help="prefix to add to plots' Y axis",
type="string",
default="None",
dest="ylabel")
parser.add_option("-w", "--which",
help="binary mask for which variables to draw, defauls is 110011",
type="string",
default="110011",
dest="which")
options, args = parser.parse_args(sys.argv[1:])
if len(args)!=2: print usage; sys.exit()
### definitions of selectors:
def DT(dt, wheel, station, sector): return dt == "DT"
def CSC(csc, endcap, station, ring, chamber):
if csc != "CSC": return False
# skip the duplicated ME1/a
if station==1 and ring==4: return False
# skip non-instrumented ME4/2's:
if station==4 and ring==2 and ( (endcap==1 and (chamber<9 or chamber >13)) or endcap==2 ) : return False
return True
def CSCE1(csc, endcap, station, ring, chamber): return CSC(csc, endcap, station, ring, chamber) and endcap==1
def CSCE2(csc, endcap, station, ring, chamber): return CSC(csc, endcap, station, ring, chamber) and endcap==2
### main part
execfile("plotscripts.py")
ROOT.gROOT.SetBatch(1)
selection = options.selection
if selection == 'ALL': selection = None
execfile(args[0])
rx = reports
execfile(args[1])
ry = reports
if options.which.count('1')>4: c1 = ROOT.TCanvas("c1","c1",1000,800)
else: c1 = ROOT.TCanvas("c1","c1",760,800)
print "corrections2D(reportsX=rx, reportsY=ry, selection=%s, pre_title_x='%s', pre_title_y='%s', which='%s' )" % (
selection, options.xlabel, options.ylabel, options.which )
eval( "corrections2D(reportsX=rx, reportsY=ry, selection=%s, pre_title_x='%s', pre_title_y='%s', which='%s', canvas=c1 )" % (
selection, options.xlabel, options.ylabel, options.which) )
c1.Update()
if len(options.filename)>0: filename = options.filename
else: filename = "reportVsReport_"+options.label+"_"+options.selection+".png"
c1.Print(filename)
| 33.356436
| 132
| 0.589196
|
f013b24b7430200b53ea90869f8ce521d77f4bdd
| 3,789
|
py
|
Python
|
rest-service/manager_rest/test/endpoints/test_events_v3.py
|
yeshess/cloudify-manager
|
04dd199ce7df54355b87e9594f9db9fb1582924b
|
[
"Apache-2.0"
] | null | null | null |
rest-service/manager_rest/test/endpoints/test_events_v3.py
|
yeshess/cloudify-manager
|
04dd199ce7df54355b87e9594f9db9fb1582924b
|
[
"Apache-2.0"
] | 1
|
2021-03-26T00:32:30.000Z
|
2021-03-26T00:32:30.000Z
|
rest-service/manager_rest/test/endpoints/test_events_v3.py
|
yeshess/cloudify-manager
|
04dd199ce7df54355b87e9594f9db9fb1582924b
|
[
"Apache-2.0"
] | 1
|
2019-11-24T12:07:18.000Z
|
2019-11-24T12:07:18.000Z
|
# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from unittest import TestCase
from nose.plugins.attrib import attr
from manager_rest.rest.resources_v3 import Events as EventsV3
from manager_rest.test import base_test
from manager_rest.test.endpoints.test_events import EventResult
@attr(client_min_version=3, client_max_version=base_test.LATEST_API_VERSION)
class MapEventToDictTestV3(TestCase):
"""Map event v3 information to a dictionary."""
def test_map_event(self):
"""Map event as returned by SQL query to elasticsearch style output."""
sql_event = EventResult(
timestamp='2016-12-09T00:00Z',
reported_timestamp='2017-05-22T00:00Z',
deployment_id='<deployment_id>',
execution_id='<execution_id>',
workflow_id='<workflow_id>',
message='<message>',
message_code=None,
event_type='<event_type>',
operation='<operation>',
node_id='<node_id>',
node_instance_id='<node_instance_id>',
node_name='<node_name>',
logger=None,
level=None,
type='cloudify_event',
)
expected_es_event = {
'deployment_id': '<deployment_id>',
'execution_id': '<execution_id>',
'workflow_id': '<workflow_id>',
'operation': '<operation>',
'node_instance_id': '<node_instance_id>',
'node_name': '<node_name>',
'event_type': '<event_type>',
'timestamp': '2016-12-09T00:00Z',
'reported_timestamp': '2017-05-22T00:00Z',
'message': '<message>',
'type': 'cloudify_event',
}
es_event = EventsV3._map_event_to_dict(None, sql_event)
self.assertDictEqual(es_event, expected_es_event)
def test_map_log(self):
"""Map log as returned by SQL query to elasticsearch style output."""
sql_log = EventResult(
timestamp='2016-12-09T00:00Z',
reported_timestamp='2017-05-22T00:00Z',
deployment_id='<deployment_id>',
execution_id='<execution_id>',
workflow_id='<workflow_id>',
message='<message>',
message_code=None,
event_type=None,
operation='<operation>',
node_id='<node_id>',
node_instance_id='<node_instance_id>',
node_name='<node_name>',
level='<level>',
logger='<logger>',
type='cloudify_log',
)
expected_es_log = {
'deployment_id': '<deployment_id>',
'execution_id': '<execution_id>',
'workflow_id': '<workflow_id>',
'operation': '<operation>',
'node_instance_id': '<node_instance_id>',
'node_name': '<node_name>',
'level': '<level>',
'timestamp': '2016-12-09T00:00Z',
'reported_timestamp': '2017-05-22T00:00Z',
'message': '<message>',
'type': 'cloudify_log',
'logger': '<logger>',
}
es_log = EventsV3._map_event_to_dict(None, sql_log)
self.assertDictEqual(es_log, expected_es_log)
| 37.147059
| 79
| 0.602006
|
a0f1c90fb66ffda8afe48e22468c83e4790a4292
| 1,325
|
py
|
Python
|
main.py
|
smrsassa/pizza
|
2e2f46f011dbad0f6865d503362bcf906a255845
|
[
"MIT"
] | null | null | null |
main.py
|
smrsassa/pizza
|
2e2f46f011dbad0f6865d503362bcf906a255845
|
[
"MIT"
] | null | null | null |
main.py
|
smrsassa/pizza
|
2e2f46f011dbad0f6865d503362bcf906a255845
|
[
"MIT"
] | null | null | null |
"""
Data.......: 17/05/2020
Projeto....: pizza
Arquivo....: main.py
Autor......: Samuel Mayer Rufino
Descrição..: Esse arquivo tem o objetivo de unir todas as funcionalidades da aplicação
a partir de funções
"""
import include as inc
from time import sleep
inc.msg.cabecalho()
sleep(3)
while True:
# menu principal
index_opc = inc.msg.index_menu()
# Atendimento
if index_opc == 1:
tel = inc.msg.atendimento_index()
existe = inc.selSql.procura_cliente(tel)
if existe:
inc.msg.pedido_index(existe)
else:
inc.msg.cadastra_cliente()
# Pedidos
elif index_opc == 2:
opc = inc.msg.pedido()
if opc == 1:
inc.selSql.pedidos_aberto()
elif opc == 2:
data = inc.msg.data_pedido()
inc.selSql.ultimos_pedidos(data)
else:
pass
#Produto / Clientes
elif index_opc == 3:
opc = inc.msg.produto()
if opc == 1:
inc.msg.inserir_pizza()
elif opc == 2:
inc.msg.editar_pizza()
elif opc == 3:
inc.msg.inativar_pizza()
elif opc == 4:
inc.msg.editar_usuario()
elif opc == 5:
inc.selSql.vendas()
else:
pass
# Sair
else:
break
| 19.485294
| 86
| 0.541132
|
9c4b24602a4b08368d99cb096d623c1e0de3159f
| 6,728
|
py
|
Python
|
test/util/bitcoin-util-test.py
|
akathedev/AFMC
|
8d03c73c0deae946312baed3131a61cfec328df0
|
[
"MIT"
] | null | null | null |
test/util/bitcoin-util-test.py
|
akathedev/AFMC
|
8d03c73c0deae946312baed3131a61cfec328df0
|
[
"MIT"
] | null | null | null |
test/util/bitcoin-util-test.py
|
akathedev/AFMC
|
8d03c73c0deae946312baed3131a61cfec328df0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for nodepay utils.
Runs automatically during `make check`.
Can also be run manually."""
from __future__ import division,print_function,unicode_literals
import argparse
import binascii
try:
import configparser
except ImportError:
import ConfigParser as configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.read_file(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "bitcoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, nodepay-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
| 37.171271
| 125
| 0.637188
|
c3c2507b69c9c28a6e78edc2fe54b73b499cd470
| 8,121
|
py
|
Python
|
neurokit2/complexity/fractal_dfa.py
|
nx10/NeuroKit
|
5d37f2c22348552635dbfb8b33a40e2354ad2f02
|
[
"MIT"
] | 1
|
2021-06-10T03:27:15.000Z
|
2021-06-10T03:27:15.000Z
|
neurokit2_mouni/complexity/fractal_dfa.py
|
m00ni19/NeuroKit
|
d3b13a8923e07e74b64bfd9ed2c848855ede4cf5
|
[
"MIT"
] | null | null | null |
neurokit2_mouni/complexity/fractal_dfa.py
|
m00ni19/NeuroKit
|
d3b13a8923e07e74b64bfd9ed2c848855ede4cf5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from ..misc import expspace
def fractal_dfa(signal, windows="default", overlap=True, integrate=True, order=1, multifractal=False, q=2, show=False):
"""(Multifractal) Detrended Fluctuation Analysis (DFA or MFDFA)
Python implementation of Detrended Fluctuation Analysis (DFA) or Multifractal DFA of a signal.
Detrended fluctuation analysis, much like the Hurst exponent, is used to find long-term statistical
dependencies in time series.
This function can be called either via ``fractal_dfa()`` or ``complexity_dfa()``, and its multifractal
variant can be directly accessed via ``fractal_mfdfa()`` or ``complexity_mfdfa()``
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
windows : list
A list containing the lengths of the windows (number of data points in each subseries). Also
referred to as 'lag' or 'scale'. If 'default', will set it to a logarithmic scale (so that each
window scale hase the same weight) with a minimum of 4 and maximum of a tenth of the length
(to have more than 10 windows to calculate the average fluctuation).
overlap : bool
Defaults to True, where the windows will have a 50% overlap
with each other, otherwise non-overlapping windows will be used.
integrate : bool
It is common practice to convert the signal to a random walk (i.e., detrend and integrate,
which corresponds to the signal 'profile'). Note that it leads to the flattening of the signal,
which can lead to the loss of some details (see Ihlen, 2012 for an explanation). Note that for
strongly anticorrelated signals, this transformation should be applied two times (i.e., provide
``np.cumsum(signal - np.mean(signal))`` instead of ``signal``).
order : int
The order of the polynoiam trend, 1 for the linear trend.
multifractal : bool
If true, compute Multifractal Detrended Fluctuation Analysis (MFDFA), in which case the argument
```q`` is taken into account.
q : list
The sequence of fractal exponents when ``multifractal=True``. Must be a sequence between -10
and 10 (nota that zero will be removed, since the code does not converge there). Setting
q = 2 (default) gives a result close to a standard DFA. For instance, Ihlen (2012) usese ``
q=[-5, -3, -1, 0, 1, 3, 5]``.
show : bool
Visualise the trend between the window size and the fluctuations.
Returns
----------
dfa : float
The DFA coefficient.
Examples
----------
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=3, noise=0.05)
>>> dfa1 = nk.fractal_dfa(signal, show=True)
>>> dfa1 #doctest: +SKIP
>>> dfa2 = nk.fractal_mfdfa(signal, q=np.arange(-3, 4), show=True)
>>> dfa2 #doctest: +SKIP
References
-----------
- Ihlen, E. A. F. E. (2012). Introduction to multifractal detrended fluctuation analysis in Matlab.
Frontiers in physiology, 3, 141.
- Hardstone, R., Poil, S. S., Schiavone, G., Jansen, R., Nikulin, V. V., Mansvelder, H. D., &
Linkenkaer-Hansen, K. (2012). Detrended fluctuation analysis: a scale-free view on neuronal
oscillations. Frontiers in physiology, 3, 450.
- `nolds <https://github.com/CSchoel/nolds/>`_
- `Youtube introduction <https://www.youtube.com/watch?v=o0LndP2OlUI>`_
"""
# Sanity checks
n = len(signal)
windows = _fractal_dfa_findwindows(n, windows)
_fractal_dfa_findwindows_warning(windows, n) # Return warning for too short windows
# Preprocessing
if integrate is True:
signal = np.cumsum(signal - np.mean(signal)) # Get signal profile
# Sanitize fractal power
if multifractal is True:
q = _fractal_mfdfa_q(q)
fluctuations = np.zeros(len(windows))
# Start looping over windows
for i, window in enumerate(windows):
# Get window
segments = _fractal_dfa_getwindow(signal, n, window, overlap=overlap)
# Get polynomial trends
trends = _fractal_dfa_trends(segments, window, order=1)
# Get local fluctuation
fluctuations[i] = _fractal_dfa_fluctuation(segments, trends, multifractal, q)
# Filter zeros
nonzero = np.nonzero(fluctuations)[0]
windows = windows[nonzero]
fluctuations = fluctuations[nonzero]
# Compute trend
if len(fluctuations) == 0:
return np.nan
else:
dfa = np.polyfit(np.log2(windows), np.log2(fluctuations), order)
if show is True:
_fractal_dfa_plot(windows, fluctuations, dfa)
return dfa[0]
# =============================================================================
# Utilities
# =============================================================================
def _fractal_dfa_findwindows(n, windows="default"):
# Convert to array
if isinstance(windows, list):
windows = np.asarray(windows)
# Default windows number
if windows is None or isinstance(windows, str):
windows = int(n / 10)
# Default windows sequence
if isinstance(windows, int):
windows = expspace(
10, int(n / 10), windows, base=2
) # see https://github.com/neuropsychology/NeuroKit/issues/206
windows = np.unique(windows) # keep only unique
return windows
def _fractal_dfa_findwindows_warning(windows, n):
# Check windows
if len(windows) < 2:
raise ValueError("NeuroKit error: fractal_dfa(): more than one window is needed.")
if np.min(windows) < 2:
raise ValueError("NeuroKit error: fractal_dfa(): there must be at least 2 data points" "in each window")
if np.max(windows) >= n:
raise ValueError(
"NeuroKit error: fractal_dfa(): the window cannot contain more data points than the" "time series."
)
def _fractal_dfa_getwindow(signal, n, window, overlap=True):
if overlap:
segments = np.array([signal[i : i + window] for i in np.arange(0, n - window, window // 2)])
else:
segments = signal[: n - (n % window)]
segments = segments.reshape((signal.shape[0] // window, window))
return segments
def _fractal_dfa_trends(segments, window, order=1):
x = np.arange(window)
coefs = np.polyfit(x[:window], segments.T, order).T
# TODO: Could this be optimized? Something like np.polyval(x[:window], coefs)
trends = np.array([np.polyval(coefs[j], x) for j in np.arange(len(segments))])
return trends
def _fractal_dfa_fluctuation(segments, trends, multifractal=False, q=2):
detrended = segments - trends
if multifractal is True:
var = np.var(detrended, axis=1)
fluctuation = np.float_power(np.mean(np.float_power(var, q / 2), axis=1) / 2, 1 / q.T)
fluctuation = np.mean(fluctuation) # Average over qs (not sure of that!)
else:
# Compute Root Mean Square (RMS)
fluctuation = np.sum(detrended ** 2, axis=1) / detrended.shape[1]
fluctuation = np.sqrt(np.sum(fluctuation) / len(fluctuation))
return fluctuation
def _fractal_dfa_plot(windows, fluctuations, dfa):
fluctfit = 2 ** np.polyval(dfa, np.log2(windows))
plt.loglog(windows, fluctuations, "bo")
plt.loglog(windows, fluctfit, "r", label=r"$\alpha$ = %0.3f" % dfa[0])
plt.title("DFA")
plt.xlabel(r"$\log_{2}$(Window)")
plt.ylabel(r"$\log_{2}$(Fluctuation)")
plt.legend()
plt.show()
# =============================================================================
# Utils MDDFA
# =============================================================================
def _fractal_mfdfa_q(q=2):
# TODO: Add log calculator for q ≈ 0
# Fractal powers as floats
q = np.asarray_chkfinite(q, dtype=float)
# Ensure q≈0 is removed, since it does not converge. Limit set at |q| < 0.1
q = q[(q < -0.1) + (q > 0.1)]
# Reshape q to perform np.float_power
q = q.reshape(-1, 1)
return q
| 36.581081
| 119
| 0.630464
|
7de1364bc16b57914fb893f6b426a799418901af
| 13,438
|
py
|
Python
|
tests/testing/vcr/integration/test_vcr.py
|
westover/tchannel-python
|
d9c16291f49b3b9dd1353c01179d4f4c3168c53a
|
[
"MIT"
] | null | null | null |
tests/testing/vcr/integration/test_vcr.py
|
westover/tchannel-python
|
d9c16291f49b3b9dd1353c01179d4f4c3168c53a
|
[
"MIT"
] | null | null | null |
tests/testing/vcr/integration/test_vcr.py
|
westover/tchannel-python
|
d9c16291f49b3b9dd1353c01179d4f4c3168c53a
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import os
import pytest
from tornado import gen
from functools import partial
from jaeger_client import Tracer, ConstSampler
from jaeger_client.reporter import InMemoryReporter
from tchannel.errors import UnexpectedError, TimeoutError
from tchannel.thrift import client_for
from tchannel.testing import vcr
@pytest.yield_fixture
def tracer():
reporter = InMemoryReporter()
tracer = Tracer(
service_name='test-tracer',
sampler=ConstSampler(True),
reporter=reporter,
)
try:
yield tracer
finally:
tracer.close()
@pytest.fixture(params=['old', 'new', 'new,tracing'])
def api(request):
return request.param
@pytest.fixture
def use_old_api(api):
return api == 'old'
@pytest.fixture
def trace_kwargs(api, tracer):
kwargs = {}
if 'tracing' in api.split(','):
kwargs['trace'] = True
kwargs['tracer'] = tracer
return kwargs
@pytest.fixture
def get_body(use_old_api):
if use_old_api:
return (lambda r: r.get_body())
else:
@gen.coroutine
def new_get_body(r):
return r.body
return new_get_body
@pytest.fixture
def call(mock_server, use_old_api, trace_kwargs):
if use_old_api:
from tchannel.tornado import TChannel
channel = TChannel('test-client')
def old_f(endpoint, body, headers=None, service=None, scheme=None,
ttl=None):
return channel.request(
hostport=mock_server.hostport,
service=service,
arg_scheme=scheme,
).send(endpoint, headers or '', body, ttl=ttl)
return old_f
else:
from tchannel import TChannel
channel = TChannel('test-client', **trace_kwargs)
def new_f(endpoint, body, headers=None, service=None, scheme=None,
ttl=None):
scheme = scheme or 'raw'
return channel.call(
hostport=mock_server.hostport,
scheme=scheme,
service=service,
arg1=endpoint,
arg2=headers or '',
arg3=body,
timeout=ttl,
)
return new_f
@pytest.fixture
def thrift_client(thrift_service, mock_server, use_old_api, trace_kwargs):
if use_old_api:
from tchannel.tornado import TChannel
return client_for('myservice', thrift_service)(
tchannel=TChannel('thrift-client'),
hostport=mock_server.hostport,
)
else:
from tchannel import TChannel
from tchannel.thrift import thrift_request_builder
myservice = thrift_request_builder(
'myservice', thrift_service, hostport=mock_server.hostport
)
return mk_fake_client(
TChannel('thrift-client', **trace_kwargs),
myservice
)
def mk_fake_client(channel, builder):
class Client(object):
@gen.coroutine
def _call(self, name, *args, **kwargs):
req = getattr(builder, name)(*args, **kwargs)
res = yield channel.thrift(req)
raise gen.Return(res.body)
def __getattr__(self, name):
return partial(self._call, name)
return Client()
@pytest.mark.gen_test
def test_record_success(tmpdir, mock_server, call, get_body):
path = tmpdir.join('data.yaml')
mock_server.expect_call('hello').and_write('world').once()
with vcr.use_cassette(str(path)) as cass:
response = yield call('hello', 'world', service='hello_service')
assert 'world' == (yield get_body(response))
assert cass.play_count == 0
assert path.check(file=True)
with vcr.use_cassette(str(path)) as cass:
response = yield call('hello', 'world', service='hello_service')
assert 'world' == (yield get_body(response))
assert cass.play_count == 1
@pytest.mark.gen_test
def test_record_success_thrift(
tmpdir, mock_server, thrift_service, thrift_client
):
path = tmpdir.join('data.yaml')
expected_item = thrift_service.Item(
'foo', thrift_service.Value(stringValue='bar')
)
mock_server.expect_call(thrift_service, method='getItem').and_result(
expected_item
).once()
with vcr.use_cassette(str(path)) as cass:
item = yield thrift_client.getItem('foo')
assert item == expected_item
assert cass.play_count == 0
assert path.check(file=True)
with vcr.use_cassette(str(path)) as cass:
item = yield thrift_client.getItem('foo')
assert item == expected_item
assert cass.play_count == 1
@pytest.mark.gen_test
def test_protocol_exception(tmpdir, mock_server, call):
path = tmpdir.join('data.yaml')
mock_server.expect_call('hello').and_raise(
Exception('great sadness')
).once()
with pytest.raises(UnexpectedError):
with vcr.use_cassette(str(path)):
yield call('hello', 'world', service='hello_service')
assert not path.check() # nothing should've been recorded
@pytest.mark.gen_test
def test_record_thrift_exception(
tmpdir, mock_server, thrift_service, thrift_client
):
path = tmpdir.join('data.yaml')
mock_server.expect_call(thrift_service, method='getItem').and_raise(
thrift_service.ItemDoesNotExist('foo')
).once()
with vcr.use_cassette(str(path)) as cass:
with pytest.raises(thrift_service.ItemDoesNotExist):
yield thrift_client.getItem('foo')
assert cass.play_count == 0
assert path.check(file=True)
with vcr.use_cassette(str(path)) as cass:
with pytest.raises(thrift_service.ItemDoesNotExist):
yield thrift_client.getItem('foo')
assert cass.play_count == 1
@pytest.mark.gen_test
def test_use_cassette_as_decorator(tmpdir, mock_server, call, get_body):
path = tmpdir.join('data.yaml')
mock_server.expect_call('hello').and_write('world').once()
@gen.coroutine
@vcr.use_cassette(str(path))
def f():
response = yield call('hello', 'world', service='hello_service')
body = yield get_body(response)
raise gen.Return(body)
body = yield f()
assert body == 'world'
body = yield f()
assert body == 'world'
@pytest.mark.gen_test
def test_use_cassette_as_decorator_with_inject(tmpdir, mock_server, call):
path = tmpdir.join('data.yaml')
mock_server.expect_call('hello').and_raise(Exception('great sadness'))
@gen.coroutine
@vcr.use_cassette(str(path), inject=True)
def f(cassette):
with pytest.raises(UnexpectedError):
yield call('hello', 'world', service='hello_service')
assert len(cassette.data) == 0
assert cassette.play_count == 0
yield f()
yield f()
@pytest.mark.gen_test
def test_use_cassette_with_matchers(tmpdir, mock_server, call, get_body):
path = tmpdir.join('data.yaml')
mock_server.expect_call('hello').and_write('world').once()
with vcr.use_cassette(str(path), matchers=['body']) as cass:
response = yield call('hello', 'world', service='hello_service')
assert 'world' == (yield get_body(response))
assert cass.play_count == 0
assert path.check(file=True)
with vcr.use_cassette(str(path), matchers=['body']) as cass:
response = yield call(
'not-hello', 'world', service='not_hello_service'
)
assert 'world' == (yield get_body(response))
assert cass.play_count == 1
@pytest.mark.gen_test
def test_record_into_nonexistent_directory(tmpdir, mock_server, call,
get_body):
path = tmpdir.join('somedir/data.yaml')
mock_server.expect_call('hello').and_write('world').once()
with vcr.use_cassette(str(path)) as cass:
response = yield call('hello', 'world', service='hello_service')
assert 'world' == (yield get_body(response))
assert cass.play_count == 0
assert path.check(file=True)
with vcr.use_cassette(str(path)) as cass:
response = yield call('hello', 'world', service='hello_service')
assert 'world' == (yield get_body(response))
assert cass.play_count == 1
@pytest.mark.gen_test
def test_record_success_with_ttl(tmpdir, mock_server, call, get_body):
path = tmpdir.join('data.yaml')
mock_server.expect_call('hello').and_write('world', delay=0.1).once()
with vcr.use_cassette(str(path)) as cass:
response = yield call('hello', 'world', service='hello_service',
ttl=0.2)
assert 'world' == (yield get_body(response))
assert cass.play_count == 0
assert path.check(file=True)
with vcr.use_cassette(str(path)) as cass:
response = yield call('hello', 'world', service='hello_service',
ttl=0.05) # shouldn't time out
assert 'world' == (yield get_body(response))
assert cass.play_count == 1
@pytest.mark.gen_test
def test_record_success_with_ttl_timeout(tmpdir, mock_server, call, get_body):
"""Make sure legitimate request timeouts propagate during recording."""
path = tmpdir.join('data.yaml')
mock_server.expect_call('hello').and_write('world', delay=0.1).once()
with pytest.raises(TimeoutError):
with vcr.use_cassette(str(path)) as cass:
response = yield call('hello', 'world', service='hello_service',
ttl=0.05)
assert 'world' == (yield get_body(response))
assert cass.play_count == 0
@pytest.mark.gen_test
@pytest.mark.parametrize('tracing_before, tracing_after', [
(True, True),
(True, False),
(False, True),
(False, False),
], ids=['trace-trace', 'trace-notrace', 'notrace-trace', 'notrace-notrace'])
def test_vcr_with_tracing(
tmpdir, mock_server, tracer, tracing_before, tracing_after
):
from tchannel import TChannel
mock_server.expect_call('hello', 'json').and_write('world').once()
path = tmpdir.join('data.yaml')
if tracing_before:
ch = TChannel('client', trace=True, tracer=tracer)
else:
ch = TChannel('client')
with vcr.use_cassette(str(path)) as cass:
response = yield ch.json(
hostport=mock_server.hostport,
service='hello_service',
endpoint='hello',
body='world',
)
assert 'world' == response.body
assert cass.play_count == 0
assert path.check(file=True)
if tracing_after:
ch = TChannel('client', trace=True, tracer=tracer)
else:
ch = TChannel('client')
with vcr.use_cassette(str(path), record_mode=vcr.RecordMode.NONE) as cass:
response = yield ch.json(
hostport=mock_server.hostport,
service='hello_service',
endpoint='hello',
body='world',
)
assert 'world' == response.body
assert cass.play_count == 1
@pytest.mark.gen_test
def test_old_recording_with_tracing(mock_server, tracer):
from tchannel import TChannel
# an existing recording that contains tracing information
path = os.path.join(
os.path.dirname(__file__), 'data', 'old_with_tracing.yaml'
)
ch = TChannel('client', trace=True, tracer=tracer)
mock_server.expect_call('hello', 'json').and_write('world').once()
with vcr.use_cassette(path, record_mode=vcr.RecordMode.NONE):
response = yield ch.json(
hostport=mock_server.hostport,
service='hello_service',
endpoint='hello',
body='world',
)
assert 'world' == response.body
@pytest.mark.gen_test
def test_old_recording_without_tracing(mock_server, tracer):
from tchannel import TChannel
# an existing recording that does not contain tracing information
path = os.path.join(
os.path.dirname(__file__), 'data', 'old_without_tracing.yaml'
)
ch = TChannel('client', trace=True, tracer=tracer)
mock_server.expect_call('hello', 'json').and_write('world').once()
with vcr.use_cassette(path, record_mode=vcr.RecordMode.NONE):
response = yield ch.json(
hostport=mock_server.hostport,
service='hello_service',
endpoint='hello',
body='world',
)
assert 'world' == response.body
| 30.130045
| 79
| 0.653594
|
ce82bc33d8c775f54790e1b787cd45d3504be6c0
| 26,532
|
py
|
Python
|
test/docker-test/oms_docker_tests.py
|
antomatody/OMS-Agent-for-Linux
|
3d034cc337d91871bb4caaba04758103166ffb28
|
[
"Apache-2.0"
] | 252
|
2015-12-11T18:38:42.000Z
|
2019-04-22T20:57:45.000Z
|
test/docker-test/oms_docker_tests.py
|
antomatody/OMS-Agent-for-Linux
|
3d034cc337d91871bb4caaba04758103166ffb28
|
[
"Apache-2.0"
] | 627
|
2016-01-09T01:17:40.000Z
|
2019-05-06T13:48:25.000Z
|
test/docker-test/oms_docker_tests.py
|
antomatody/OMS-Agent-for-Linux
|
3d034cc337d91871bb4caaba04758103166ffb28
|
[
"Apache-2.0"
] | 203
|
2019-05-20T02:51:32.000Z
|
2022-03-28T09:07:41.000Z
|
"""
Test the OMS Agent on all or a subset of images.
Setup: read parameters and setup HTML report
Test:
1. Create container and install agent
2. Wait for data to propagate to backend and check for data
?. Repeat steps 1 and 2 with newer agent
4. De-onboard and re-onboard agent
5. Remove agent
6. Reinstall agent
?. Optionally, wait for hours and check data and agent status
7. Purge agent and delete container
Finish: compile HTML report and log file
"""
import argparse
import atexit
import enum
import json
import os
import subprocess
import re
import shutil
import sys
from collections import OrderedDict
from datetime import datetime, timedelta
from glob import glob
from time import sleep
from json2html import *
from verify_e2e import check_e2e
E2E_DELAY = 10 # Delay (minutes) before checking for data
SUCCESS_TEMPLATE = "<td><span style='background-color: #66ff99'>{0}</span></td>"
FAILURE_TEMPLATE = "<td><span style='background-color: red; color: white'>{0}</span></td>"
class WorkspaceStatus(enum.Enum):
ONBOARDED = 1
NOT_ONBOARDED = 2
ERROR = 3
class Color:
BOLD = '\033[1m'
RED = '\033[91m'
ENDC = '\033[0m'
images = ["ubuntu14", "ubuntu16", "ubuntu18", "ubuntu20py3", "debian8", "debian9", "debian10", "centos6", "centos7", "centos8py3", "oracle6", "oracle7", "redhat6", "redhat7", "redhat8py3"]
# images = ["ubuntu14", "ubuntu16", "ubuntu18", "ubuntu20", "debian8", "debian9", "debian10", "centos6", "centos7", "centos8", "oracle6", "oracle7", "redhat6", "redhat7", "redhat8"]
python3_images = ["ubuntu20py3", "redhat8py3", "centos8py3"]
hostnames = []
install_times = {}
procs = {}
example_text = """examples:
$ python -u oms_docker_tests.py\t\t\tall images
$ python -u oms_docker_tests.py -i -p\t\t\tall images, in parallel, with instant upgrade
$ python -u oms_docker_tests.py -p -l 120\t\tall images, in parallel, long mode with length specified
$ python -u oms_docker_tests.py -d image1 image2 ...\tsubset of images
"""
parser = argparse.ArgumentParser(epilog=example_text, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-p', '--parallel', action='store_true', help='test distros in parallel')
parser.add_argument('-i', '--instantupgrade', action='store_true', help='test upgrade on top of old bundle')
parser.add_argument('-l', '--long', nargs='?', type=int, const=250, default=0, help='add a long wait in minutes followed by a second verification (default: 250)')
parser.add_argument('-d', '--distros', nargs='*', default=images, help='list of distros to test (default: all)')
args = parser.parse_args()
images = [i for i in args.distros if i in images]
invalid = [i for i in args.distros if i not in images]
if invalid:
print('invalid distro(s): {0}. continuing ...'.format(invalid))
with open('{0}/parameters.json'.format(os.getcwd()), 'r') as f:
parameters = f.read()
if re.search(r'"<.*>"', parameters):
print('Please replace placeholders in parameters.json')
exit()
parameters = json.loads(parameters)
try:
if parameters['oms bundle'] and os.path.isfile('omsfiles/'+parameters['oms bundle']):
oms_bundle = parameters['oms bundle']
if parameters['old oms bundle'] and os.path.isfile('omsfiles/'+parameters['old oms bundle']):
old_oms_bundle = parameters['old oms bundle']
except KeyError:
print('parameters not defined correctly or omsbundle file not found')
workspace_id = parameters['workspace id']
workspace_key = parameters['workspace key']
def append_file(src, dest):
"""Append contents of src to dest."""
f = open(src, 'r')
dest.write(f.read())
f.close()
def copy_append_remove(container, image, src, dest):
"""Copy file from docker container, append it to the specified destination, and delete it"""
os.system("docker cp {0}:/home/temp/{1} results/{2}/".format(container, src, image))
append_file('results/{0}/{1}'.format(image, src), dest)
os.remove('results/{0}/{1}'.format(image, src))
def write_log_command(cmd, log):
"""Print cmd to stdout and append it to log file."""
print(Color.BOLD + cmd + Color.ENDC)
log.write(cmd + '\n')
log.write('-' * 40)
log.write('\n')
def get_time_diff(timevalue1, timevalue2):
"""Get time difference in minutes and seconds"""
timediff = timevalue2 - timevalue1
minutes, seconds = divmod(timediff.days * 86400 + timediff.seconds, 60)
return minutes, seconds
def setup_vars(image):
"""Set up variables and open necessary log files for a generalized test operation."""
container = image + '-container'
log_path = 'results/{0}/result.log'.format(image)
html_path = 'results/{0}/result.html'.format(image)
omslog_path = 'results/{0}/omsagent.log'.format(image)
tmp_path = 'results/{0}/temp.log'.format(image)
log_file = open(log_path, 'a+')
html_file = open(html_path, 'a+')
oms_file = open(omslog_path, 'a+')
return container, log_path, html_path, omslog_path, tmp_path, log_file, html_file, oms_file
def close_files(*args):
for f in args:
f.close()
def get_versioned_python(image):
if image in python3_images:
return "python3"
else:
return "python2"
def check_workspace_status(container):
"""Check the onboarding status of the agent using omsadmin.sh."""
try:
out = subprocess.check_output('docker exec {0} /opt/microsoft/omsagent/bin/omsadmin.sh -l'.format(container), shell=True)
except subprocess.CalledProcessError as e:
return WorkspaceStatus.ERROR
if re.search('[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}', out).group(0) == workspace_id:
return WorkspaceStatus.ONBOARDED
elif out.rstrip() == "No Workspace":
return WorkspaceStatus.NOT_ONBOARDED
else:
return WorkspaceStatus.ERROR
# TODO this should be elsewhere/def'd
for image in images:
path = 'results/{0}'.format(image)
if not os.path.isdir('results/{0}'.format(image)):
os.mkdir(path)
subfolder = '{}/'.format(images[0]) if len(images) == 1 or args.parallel else ''
result_html_file = open('results/{0}finalresult.html'.format(subfolder), 'a+')
result_log_file = open('results/{0}finalresult.log'.format(subfolder), 'a+')
htmlstart = """<!DOCTYPE html>
<html>
<head>
<style>
table {
font-family: arial, sans-serif;
border-collapse: collapse;
width: 100%;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style>
</head>
<body>
"""
result_html_file.write(htmlstart)
def main():
"""Orchestrate fundemental testing steps onlined in header docstring."""
if args.parallel:
print('Running tests in parallel. Progress will be hidden. Final report will generated for each distro individually')
for image in images:
flags = ' '.join([a for a in sys.argv[1:] if a not in images and a not in ['-p', '--parallel', '-d', '--distros']])
cmd = 'python -u {0} {1} -d {2}'.format(sys.argv[0], flags, image).split()
print(cmd)
with open(os.devnull, 'wb') as devnull:
procs[image] = subprocess.Popen(cmd, stdout=devnull, stderr=devnull, env={'SUBPROCESS': 'true'})
done = False
elapsed_time = 0
while not done:
status = []
status_msg = '\rStatus after {0} minutes ['.format(elapsed_time)
for p in procs.items():
status.append(p[1].poll())
status_code = 'running' if status[-1] is None else (Color.RED + status[-1] + Color.ENDC if status[-1] else 'done')
status_msg += ' {0}: {1},'.format(p[0], status_code)
sys.stdout.write(status_msg[:-1] + ' ]')
sys.stdout.flush()
done = True if None not in status else False
sleep(60)
elapsed_time += 1
print('\nFinished!')
else:
if args.instantupgrade:
if not old_oms_bundle:
print('Instant upgrade specified but no old oms bundle provided. Check parameters.json and omsfiles directory for bundle file existence')
sys.exit(0)
install_msg = install_agent(old_oms_bundle)
verify_msg = verify_data()
instantupgrade_install_msg = upgrade_agent(oms_bundle)
instantupgrade_verify_msg = verify_data()
deonboard_reonboard_msg = deonboard_reonboard()
else:
install_msg = install_agent(oms_bundle)
verify_msg = verify_data()
deonboard_reonboard_msg = deonboard_reonboard()
instantupgrade_install_msg, instantupgrade_verify_msg = None, None
remove_msg = remove_agent()
reinstall_msg = reinstall_agent()
if args.long:
for i in reversed(range(1, args.long + 1)):
sys.stdout.write('\rLong-term delay: T-{0} minutes...'.format(i))
sys.stdout.flush()
sleep(60)
print('')
install_times.clear()
for image in images:
install_times.update({image: datetime.now()})
container = image + '-container'
inject_logs(container, image)
long_verify_msg = verify_data()
long_status_msg = check_status()
else:
long_verify_msg, long_status_msg = None, None
purge_delete_agent()
messages = (install_msg, verify_msg, instantupgrade_install_msg, instantupgrade_verify_msg, deonboard_reonboard_msg, remove_msg, reinstall_msg, long_verify_msg, long_status_msg)
create_report(messages)
def install_agent(oms_bundle):
"""Run container and install the OMS agent, returning HTML results."""
message = ""
version = re.search(r'omsagent-\s*([\d.\d-]+)', oms_bundle).group(1)[:-1]
install_times.clear()
for image in images:
container, _, _, _, tmp_path, log_file, html_file, oms_file = setup_vars(image)
write_log_command("\n[{0}] Install OMS Agent {1} ...".format(image, version), log_file)
html_file.write("<h1 id='{0}'> Container: {0} <h1>".format(image))
os.system("docker container stop {0} 2> /dev/null".format(container))
os.system("docker container rm {0} 2> /dev/null".format(container))
uid = os.popen("docker run --name {0} -it --privileged=true -d {1}".format(container, image)).read()[:12]
hostname = image + '-' + uid # uid is the truncated container uid
hostnames.append(hostname)
os.system("docker cp omsfiles/ {0}:/home/temp/".format(container))
os.system("docker exec {0} hostname {1}".format(container, hostname))
os.system("docker exec {0} {1} -u /home/temp/omsfiles/oms_run_script.py -preinstall".format(container, get_versioned_python(image)))
os.system("docker exec {0} sh /home/temp/omsfiles/{1} --purge | tee -a {2}".format(container, oms_bundle, tmp_path))
os.system("docker exec {0} sh /home/temp/omsfiles/{1} --upgrade -w {2} -s {3} | tee -a {4}".format(container, oms_bundle, workspace_id, workspace_key, tmp_path))
os.system("docker exec {0} {1} -u /home/temp/omsfiles/oms_run_script.py -postinstall".format(container, get_versioned_python(image)))
os.system("docker exec {0} {1} -u /home/temp/omsfiles/oms_run_script.py -status".format(container, get_versioned_python(image)))
install_times.update({image: datetime.now()})
write_log_command("\n[{0}] Inject Logs ...".format(image), log_file)
inject_logs(container, image)
append_file(tmp_path, log_file)
os.remove(tmp_path)
copy_append_remove(container, image, 'omsresults.out', log_file)
html_file.write("<h2> Install OMS Agent {0} </h2>".format(version))
copy_append_remove(container, image, 'omsresults.html', html_file)
close_files(log_file, html_file, oms_file)
status = check_workspace_status(container)
if status == WorkspaceStatus.ONBOARDED:
message += SUCCESS_TEMPLATE.format("Install Success")
elif status == WorkspaceStatus.NOT_ONBOARDED:
message += FAILURE_TEMPLATE.format("Onboarding Failed")
else:
message += FAILURE_TEMPLATE.format("Install Failed")
return message
def upgrade_agent(oms_bundle):
message = ""
version = re.search(r'omsagent-\s*([\d.\d-]+)', oms_bundle).group(1)[:-1]
install_times.clear()
for image in images:
container, _, _, _, tmp_path, log_file, html_file, oms_file = setup_vars(image)
write_log_command("\n[{0}] Upgrade OMS Agent {1} ...".format(image, version), log_file)
os.system("docker exec {0} sh /home/temp/omsfiles/{1} --upgrade -w {2} -s {3} | tee -a {4}".format(container, oms_bundle, workspace_id, workspace_key, tmp_path))
os.system("docker exec {0} {1} -u /home/temp/omsfiles/oms_run_script.py -postinstall".format(container, get_versioned_python(image)))
os.system("docker exec {0} {1} -u /home/temp/omsfiles/oms_run_script.py -status".format(container, get_versioned_python(image)))
install_times.update({image: datetime.now()})
inject_logs(container, image)
append_file(tmp_path, log_file)
os.remove(tmp_path)
copy_append_remove(container, image, 'omsresults.out', log_file)
html_file.write("<h2> Upgrade OMS Agent {0} </h2>".format(version))
copy_append_remove(container, image, 'omsresults.html', html_file)
close_files(log_file, html_file, oms_file)
status = check_workspace_status(container)
if status == WorkspaceStatus.ONBOARDED:
message += SUCCESS_TEMPLATE.format("Install Success")
elif status == WorkspaceStatus.NOT_ONBOARDED:
message += FAILURE_TEMPLATE.format("Onboarding Failed")
else:
message += FAILURE_TEMPLATE.format("Install Failed")
return message
def inject_logs(container, image):
"""Inject logs."""
# os.system("docker exec {0} {1} -u /home/temp/omsfiles/oms_run_script.py -injectlogs".format(container, get_versioned_python(image)))
sleep(60)
os.system("docker exec {0} {1} -u /home/temp/omsfiles/oms_run_script.py -injectlogs".format(container, get_versioned_python(image)))
def verify_data():
"""Verify data end-to-end, returning HTML results."""
message = ""
for hostname in hostnames:
image = hostname.split('-')[0]
_, _, _, _, _, log_file, html_file, _ = setup_vars(image)
write_log_command('\n[{0}] Verify E2E Data Results'.format(image), log_file)
while datetime.now() < (install_times[image] + timedelta(minutes=E2E_DELAY)):
mins, secs = get_time_diff(datetime.now(), install_times[image] + timedelta(minutes=E2E_DELAY))
sys.stdout.write('\rE2E propagation delay for {0}: {1} minutes {2} seconds ...'.format(image, mins, secs))
sys.stdout.flush()
sleep(1)
print('')
minutes, _ = get_time_diff(install_times[image], datetime.now())
timespan = 'PT{0}M'.format(minutes)
data = check_e2e(hostname, timespan)
# write detailed table for image
html_file.write("<h2> Verify Data from OMS workspace </h2>")
results = data[image][0]
log_file.write(image + ':\n' + json.dumps(results, indent=4, separators=(',', ': ')) + '\n')
# prepend distro column to results row before generating the table
data = [OrderedDict([('Distro', image)] + results.items())]
out = json2html.convert(data)
html_file.write(out)
close_files(log_file, html_file)
# write to summary table
from verify_e2e import success_count
if success_count == 6:
message += SUCCESS_TEMPLATE.format("Verify Success")
elif 0 < success_count < 6:
from verify_e2e import success_sources, failed_sources
message += """<td><span style='background-color: #66ff99'>{0} Success</span> <br><br><span style='background-color: red; color: white'>{1} Failed</span></td>""".format(', '.join(success_sources), ', '.join(failed_sources))
elif success_count == 0:
message += FAILURE_TEMPLATE.format("Verify Failed")
return message
def deonboard_reonboard():
"""De-onboard, then re-onboard the agent."""
message = ""
for image in images:
container, _, _, _, tmp_path, log_file, html_file, _ = setup_vars(image)
write_log_command('\n[{0}] De-onboard and Re-onboard OMS Agent ...'.format(image), log_file)
html_file.write("<h2> De-onboard and Re-onboard OMS Agent </h2>")
# set -o pipefail is needed to get the exit code in case the docker exec command fails; otherwise os.system returns the exit code of tee
try:
subprocess.check_output("set -o pipefail && docker exec {0} /opt/microsoft/omsagent/bin/omsadmin.sh -X | tee -a {1}".format(container, tmp_path), shell=True, executable='/bin/bash')
try:
subprocess.check_output("set -o pipefail && docker exec {0} /opt/microsoft/omsagent/bin/omsadmin.sh -w {1} -s {2} | tee -a {3}".format(container, workspace_id, workspace_key, tmp_path), shell=True, executable='/bin/bash')
message += SUCCESS_TEMPLATE.format("De-onboarding and Re-onboarding Success")
except subprocess.CalledProcessError as e:
message += FAILURE_TEMPLATE.format("De-onboarding Success; Re-onboarding Failure")
except subprocess.CalledProcessError as e:
message += FAILURE_TEMPLATE.format("De-onboarding Failure")
append_file(tmp_path, log_file)
os.remove(tmp_path)
close_files(log_file, html_file)
return message
def remove_agent():
"""Remove the OMS agent, returning HTML results."""
message = ""
for image in images:
container, _, _, _, tmp_path, log_file, html_file, oms_file = setup_vars(image)
write_log_command('\n[{0}] Remove OMS Agent ...'.format(image), log_file)
os.system("docker exec {0} {1} -u /home/temp/omsfiles/oms_run_script.py -copyomslogs".format(container, get_versioned_python(image)))
copy_append_remove(container, image, 'copyofomsagent.log', oms_file)
os.system("docker exec {0} sh /home/temp/omsfiles/{1} --remove | tee -a {2}".format(container, oms_bundle, tmp_path))
os.system("docker exec {0} {1} -u /home/temp/omsfiles/oms_run_script.py -status".format(container, get_versioned_python(image)))
append_file(tmp_path, log_file)
os.remove(tmp_path)
copy_append_remove(container, image, 'omsresults.out', log_file)
html_file.write("<h2> Remove OMS Agent </h2>")
copy_append_remove(container, image, 'omsresults.html', html_file)
close_files(log_file, html_file, oms_file)
status = check_workspace_status(container)
if status == WorkspaceStatus.ONBOARDED:
message += FAILURE_TEMPLATE.format("Remove Failed")
elif status == WorkspaceStatus.NOT_ONBOARDED:
message += FAILURE_TEMPLATE.format("Onboarding Failed")
else:
message += SUCCESS_TEMPLATE.format("Remove Success")
return message
def reinstall_agent():
"""Reinstall the OMS agent, returning HTML results."""
message = ""
for image in images:
container, _, _, _, tmp_path, log_file, html_file, oms_file = setup_vars(image)
write_log_command("\n[{0}] Reinstall OMS Agent ...".format(image), log_file)
os.system("docker exec {0} sh /home/temp/omsfiles/{1} --upgrade | tee -a {2}".format(container, oms_bundle, tmp_path))
os.system("docker exec {0} /opt/microsoft/omsagent/bin/omsadmin.sh -w {1} -s {2} | tee -a {3}".format(container, workspace_id, workspace_key, tmp_path))
os.system("docker exec {0} {1} -u /home/temp/omsfiles/oms_run_script.py -postinstall".format(container, get_versioned_python(image)))
os.system("docker exec {0} {1} -u /home/temp/omsfiles/oms_run_script.py -status".format(container, get_versioned_python(image)))
append_file(tmp_path, log_file)
os.remove(tmp_path)
copy_append_remove(container, image, 'omsresults.out', log_file)
html_file.write("<h2> Reinstall OMS Agent </h2>")
copy_append_remove(container, image, 'omsresults.html', html_file)
close_files(log_file, html_file, oms_file)
status = check_workspace_status(container)
if status == WorkspaceStatus.ONBOARDED:
message += SUCCESS_TEMPLATE.format("Reinstall Success")
elif status == WorkspaceStatus.NOT_ONBOARDED:
message += FAILURE_TEMPLATE.format("Onboarding Failed")
else:
message += FAILURE_TEMPLATE.format("Reinstall Failed")
return message
def check_status():
"""Check agent status."""
message = ""
for image in images:
container, _, _, _, _, log_file, html_file, oms_file = setup_vars(image)
write_log_command("\n[{0}] Check Status ...".format(image), log_file)
os.system("docker exec {0} {1} -u /home/temp/omsfiles/oms_run_script.py -status".format(container, get_versioned_python(image)))
copy_append_remove(container, image, 'omsresults.out', log_file)
html_file.write("<h2> Check OMS Agent Status </h2>")
copy_append_remove(container, image, 'omsresults.html', html_file)
close_files(log_file, html_file, oms_file)
if os.system('docker exec {0} /opt/microsoft/omsagent/bin/omsadmin.sh -l'.format(container)) == 0:
out = str(subprocess.check_output('docker exec {0} /opt/microsoft/omsagent/bin/omsadmin.sh -l'.format(container), shell=True))
if 'Onboarded' in out:
message += SUCCESS_TEMPLATE.format("Agent Running")
elif 'Warning' in out:
message += FAILURE_TEMPLATE.format("Agent Registered, Not Running")
elif 'Saved' in out:
message += FAILURE_TEMPLATE.format("Agent Not Running, Not Registered")
elif 'Failure' in out:
message += FAILURE_TEMPLATE.format("Agent Not Running, Not Onboarded")
else:
message += FAILURE_TEMPLATE.format("Agent Not Installed")
return message
def purge_delete_agent():
"""Purge the OMS agent and delete container."""
for image in images:
container, _, _, omslog_path, tmp_path, log_file, html_file, oms_file = setup_vars(image)
write_log_command('\n[{0}] Purge OMS Agent ...'.format(image), oms_file)
os.system("docker exec {0} {1} -u /home/temp/omsfiles/oms_run_script.py -copyomslogs".format(container, get_versioned_python(image)))
copy_append_remove(container, image, 'copyofomsagent.log', oms_file)
os.system("docker exec {0} sh /home/temp/omsfiles/{1} --purge | tee -a {2}".format(container, oms_bundle, tmp_path))
append_file(tmp_path, log_file)
os.remove(tmp_path)
append_file(omslog_path, log_file)
close_files(log_file, html_file, oms_file)
os.system("docker container stop {0}".format(container))
os.system("docker container rm {0}".format(container))
def create_report(messages):
"""Compile the final HTML report."""
install_msg, verify_msg, instantupgrade_install_msg, instantupgrade_verify_msg, deonboard_reonboard_msg, remove_msg, reinstall_msg, long_verify_msg, long_status_msg = messages
# summary table
imagesth = ""
resultsth = ""
for image in images:
imagesth += """
<th>{0}</th>""".format(image)
resultsth += """
<th><a href='#{0}'>{0} results</a></th>""".format(image)
# pre-compile instant-upgrade summary
if instantupgrade_install_msg and instantupgrade_verify_msg:
instantupgrade_summary = """
<tr>
<td>Instant Upgrade Install Status</td>
{0}
</tr>
<tr>
<td>Instant Upgrade Verify Data</td>
{1}
</tr>
""".format(instantupgrade_install_msg, instantupgrade_verify_msg)
else:
instantupgrade_summary = ""
# pre-compile long-running summary
if long_verify_msg and long_status_msg:
long_running_summary = """
<tr>
<td>Long-Term Verify Data</td>
{0}
</tr>
<tr>
<td>Long-Term Status</td>
{1}
</tr>
""".format(long_verify_msg, long_status_msg)
else:
long_running_summary = ""
statustable = """
<table>
<caption><h2>Test Result Table</h2><caption>
<tr>
<th>Distro</th>
{0}
</tr>
<tr>
<td>Install OMSAgent</td>
{1}
</tr>
<tr>
<td>Verify Data</td>
{2}
</tr>
{3}
<tr>
<td>Deonboard and Reonboard OMSAgent</td>
{4}
</tr>
<tr>
<td>Remove OMSAgent</td>
{5}
</tr>
<tr>
<td>Reinstall OMSAgent</td>
{6}
</tr>
{7}
<tr>
<td>Result Link</td>
{8}
<tr>
</table>
""".format(imagesth, install_msg, verify_msg, instantupgrade_summary, deonboard_reonboard_msg, remove_msg, reinstall_msg, long_running_summary, resultsth)
result_html_file.write(statustable)
# Create final html & log file
for image in images:
append_file('results/{}/result.log'.format(image), result_log_file)
append_file('results/{}/result.html'.format(image), result_html_file)
result_log_file.close()
htmlend = """
</body>
</html>
"""
result_html_file.write(htmlend)
result_html_file.close()
def archive_results():
archive_path = 'results/' + datetime.now().strftime('%Y-%m-%d %H.%M.%S')
os.mkdir(archive_path)
for f in [f for f in glob('results/*') if f.split('/')[1] in images or f.startswith('results/finalresult') ]:
shutil.move(os.path.join(f), os.path.join(archive_path))
def cleanup():
sys.stdout.write('Initiating cleanup\n')
sys.stdout.flush()
archive_results()
for p in procs.items():
if p[1].poll() is None:
p[1].kill()
for image in images:
container = image + '-container'
os.system('docker kill {} 2> /dev/null'.format(container))
os.system('docker rm --force {} 2> /dev/null'.format(container))
sleep(1)
if __name__ == '__main__':
if not os.environ.get('SUBPROCESS'):
atexit.register(cleanup)
main()
| 44.893401
| 237
| 0.648613
|
f8058d140f7efdea4c6af5ae400f41a9b51361af
| 307
|
py
|
Python
|
outils/unix_pre-commit.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | 14
|
2015-08-21T19:15:21.000Z
|
2017-11-26T13:59:17.000Z
|
outils/unix_pre-commit.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | 20
|
2015-09-29T20:50:45.000Z
|
2018-06-21T12:58:30.000Z
|
outils/unix_pre-commit.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | 3
|
2015-05-02T19:42:03.000Z
|
2018-09-06T10:55:00.000Z
|
#!/usr/local/bin/python3.4
"""Hook de pre-commit Git pour Unix.
Pour utiliser, placez-le dans le dossier .git/hooks en le renommant
en 'pre-commit' sans extension.
"""
import os
import sys
# On exécute les tests unitaires
os.chdir("src")
code = os.system(sys.executable + " runtest.py")
sys.exit(code)
| 18.058824
| 67
| 0.71987
|
be055762a531dae1eeffd3373834edb6b8591540
| 4,246
|
py
|
Python
|
test/integration/014_hook_tests/test_model_hooks_bq.py
|
tjengel/dbt
|
f985902a002fba36f6f709c6aacf9ae20778e58c
|
[
"Apache-2.0"
] | 1
|
2019-10-18T01:16:33.000Z
|
2019-10-18T01:16:33.000Z
|
test/integration/014_hook_tests/test_model_hooks_bq.py
|
tjengel/dbt
|
f985902a002fba36f6f709c6aacf9ae20778e58c
|
[
"Apache-2.0"
] | null | null | null |
test/integration/014_hook_tests/test_model_hooks_bq.py
|
tjengel/dbt
|
f985902a002fba36f6f709c6aacf9ae20778e58c
|
[
"Apache-2.0"
] | null | null | null |
from test.integration.base import DBTIntegrationTest, use_profile
MODEL_PRE_HOOK = """
insert into {{this.schema}}.on_model_hook (
state,
target_name,
target_schema,
target_type,
target_threads,
run_started_at,
invocation_id
) VALUES (
'start',
'{{ target.name }}',
'{{ target.schema }}',
'{{ target.type }}',
{{ target.threads }},
'{{ run_started_at }}',
'{{ invocation_id }}'
)
"""
MODEL_POST_HOOK = """
insert into {{this.schema}}.on_model_hook (
state,
target_name,
target_schema,
target_type,
target_threads,
run_started_at,
invocation_id
) VALUES (
'end',
'{{ target.name }}',
'{{ target.schema }}',
'{{ target.type }}',
{{ target.threads }},
'{{ run_started_at }}',
'{{ invocation_id }}'
)
"""
class TestBigqueryPrePostModelHooks(DBTIntegrationTest):
def setUp(self):
DBTIntegrationTest.setUp(self)
self.run_sql_file("seed_model_bigquery.sql")
self.fields = [
'state',
'target_name',
'target_schema',
'target_threads',
'target_type',
'run_started_at',
'invocation_id'
]
@property
def schema(self):
return "model_hooks_014"
@property
def profile_config(self):
profile = self.bigquery_profile()
profile['test']['outputs']['default2']['threads'] = 3
return profile
@property
def project_config(self):
return {
'macro-paths': ['macros'],
'models': {
'test': {
'pre-hook': [MODEL_PRE_HOOK],
'post-hook':[MODEL_POST_HOOK]
}
}
}
@property
def models(self):
return "models"
def get_ctx_vars(self, state):
field_list = ", ".join(self.fields)
query = "select {field_list} from `{schema}.on_model_hook` where state = '{state}'".format(field_list=field_list, schema=self.unique_schema(), state=state)
vals = self.run_sql(query, fetch='all')
self.assertFalse(len(vals) == 0, 'nothing inserted into hooks table')
self.assertFalse(len(vals) > 1, 'too many rows in hooks table')
ctx = dict(zip(self.fields, vals[0]))
return ctx
def check_hooks(self, state):
ctx = self.get_ctx_vars(state)
self.assertEqual(ctx['state'], state)
self.assertEqual(ctx['target_name'], 'default2')
self.assertEqual(ctx['target_schema'], self.unique_schema())
self.assertEqual(ctx['target_threads'], 3)
self.assertEqual(ctx['target_type'], 'bigquery')
self.assertTrue(ctx['run_started_at'] is not None and len(ctx['run_started_at']) > 0, 'run_started_at was not set')
self.assertTrue(ctx['invocation_id'] is not None and len(ctx['invocation_id']) > 0, 'invocation_id was not set')
@use_profile('bigquery')
def test_pre_and_post_model_hooks_bigquery(self):
self.run_dbt(['run'])
self.check_hooks('start')
self.check_hooks('end')
class TestBigqueryPrePostModelHooksOnSeeds(DBTIntegrationTest):
@property
def schema(self):
return "model_hooks_014"
@property
def models(self):
return "seed-models-bq"
@property
def project_config(self):
return {
'data-paths': ['data'],
'models': {},
'seeds': {
'post-hook': [
'insert into {{ this }} (a, b, c) VALUES (10, 11, 12)',
]
}
}
@use_profile('bigquery')
def test_hooks_on_seeds_bigquery(self):
res = self.run_dbt(['seed'])
self.assertEqual(len(res), 1, 'Expected exactly one item')
res = self.run_dbt(['test'])
self.assertEqual(len(res), 1, 'Expected exactly one item')
result = self.run_sql(
'select a, b, c from `{schema}`.`example_seed` where a = 10',
fetch='all'
)
self.assertFalse(len(result) == 0, 'nothing inserted into table by hook')
self.assertFalse(len(result) > 1, 'too many rows in table')
| 28.496644
| 163
| 0.565473
|
b512651a9b2a6e2811e715ff8dcf7fcf6e5113fd
| 11,454
|
py
|
Python
|
tests/test_retokenize.py
|
YianZhang/jiant-v1-legacy-online-code
|
b6b1066de7cdbe1b95ca1ae3de6989d07b2e9629
|
[
"MIT"
] | 74
|
2020-06-11T11:37:57.000Z
|
2022-03-07T09:44:05.000Z
|
tests/test_retokenize.py
|
YianZhang/jiant-v1-legacy-online-code
|
b6b1066de7cdbe1b95ca1ae3de6989d07b2e9629
|
[
"MIT"
] | 3
|
2020-10-08T18:09:58.000Z
|
2021-07-22T22:24:02.000Z
|
tests/test_retokenize.py
|
YianZhang/jiant-v1-legacy-online-code
|
b6b1066de7cdbe1b95ca1ae3de6989d07b2e9629
|
[
"MIT"
] | 13
|
2020-06-18T11:53:19.000Z
|
2022-03-23T17:15:44.000Z
|
import unittest
import jiant.utils.retokenize as retokenize
class TestRetokenize(unittest.TestCase):
def setUp(self):
self.text = [
"Members of the House clapped their hands",
"I look at Sarah's dog. It was cute.!",
"Mr. Immelt chose to focus on the incomprehensibility of accounting rules.",
"What?",
]
self.token_index_src = [
[0, 1, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[0],
]
self.span_index_src = [
[(0, 4), (5, 7)],
[(0, 1), (3, 5)],
[(0, 2), (6, 11), (6, 8), (7, 11)],
[(0, 1)],
]
def test_moses(self):
self.tokens = [
["Members", "of", "the", "House", "clapped", "their", "hands"],
["I", "look", "at", "Sarah", "'s", "dog", ".", "It", "was", "cute", ".", "!"],
[
"Mr.",
"Immelt",
"chose",
"to",
"focus",
"on",
"the",
"incomprehensibility",
"of",
"accounting",
"rules",
".",
],
["What", "?"],
]
self.token_index_tgt = [
[[0], [1], [2], [3], [4], [5], [6]],
[[0], [1], [2], [3, 4], [5, 6], [7], [8], [9, 10, 11]],
[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10, 11]],
[[0, 1]],
]
self.span_index_tgt = [
[(0, 4), (5, 7)],
[(0, 1), (3, 7)],
[(0, 2), (6, 12), (6, 8), (7, 12)],
[(0, 2)],
]
aligner_fn = retokenize.get_aligner_fn("transfo-xl-wt103")
token_aligners, tokens = zip(*(aligner_fn(sent) for sent in self.text))
token_aligners, tokens = list(token_aligners), list(tokens)
token_index_tgt = [
[token_aligner.project_tokens(idxs).tolist() for idxs in token_idxs]
for token_aligner, token_idxs in zip(token_aligners, self.token_index_src)
]
span_index_tgt = [
[token_aligner.project_span(start, end) for (start, end) in span_idxs]
for token_aligner, span_idxs in zip(token_aligners, self.span_index_src)
]
assert self.tokens == tokens
assert self.token_index_tgt == token_index_tgt
assert self.span_index_tgt == span_index_tgt
def test_wpm(self):
self.tokens = [
["Members", "of", "the", "House", "clapped", "their", "hands"],
["I", "look", "at", "Sarah", "'", "s", "dog", ".", "It", "was", "cute", ".", "!"],
[
"Mr",
".",
"I",
"##mme",
"##lt",
"chose",
"to",
"focus",
"on",
"the",
"in",
"##com",
"##p",
"##re",
"##hen",
"##si",
"##bility",
"of",
"accounting",
"rules",
".",
],
["What", "?"],
]
self.token_index_tgt = [
[[0], [1], [2], [3], [4], [5], [6]],
[[0], [1], [2], [3, 4, 5], [6, 7], [8], [9], [10, 11, 12]],
[
[0, 1],
[2, 3, 4],
[5],
[6],
[7],
[8],
[9],
[10, 11, 12, 13, 14, 15, 16],
[17],
[18],
[19, 20],
],
[[0, 1]],
]
self.span_index_tgt = [
[(0, 4), (5, 7)],
[(0, 1), (3, 8)],
[(0, 5), (9, 21), (9, 17), (10, 21)],
[(0, 2)],
]
aligner_fn = retokenize.get_aligner_fn("bert-base-cased")
token_aligners, tokens = zip(*(aligner_fn(sent) for sent in self.text))
token_aligners, tokens = list(token_aligners), list(tokens)
token_index_tgt = [
[token_aligner.project_tokens(idxs).tolist() for idxs in token_idxs]
for token_aligner, token_idxs in zip(token_aligners, self.token_index_src)
]
span_index_tgt = [
[token_aligner.project_span(start, end) for (start, end) in span_idxs]
for token_aligner, span_idxs in zip(token_aligners, self.span_index_src)
]
assert self.tokens == tokens
assert self.token_index_tgt == token_index_tgt
assert self.span_index_tgt == span_index_tgt
def test_bpe(self):
self.tokens = [
[
"members</w>",
"of</w>",
"the</w>",
"house</w>",
"clapped</w>",
"their</w>",
"hands</w>",
],
[
"i</w>",
"look</w>",
"at</w>",
"sarah</w>",
"'s</w>",
"dog</w>",
".</w>",
"it</w>",
"was</w>",
"cute</w>",
".</w>",
"!</w>",
],
[
"mr.</w>",
"im",
"melt</w>",
"chose</w>",
"to</w>",
"focus</w>",
"on</w>",
"the</w>",
"in",
"comprehen",
"si",
"bility</w>",
"of</w>",
"accounting</w>",
"rules</w>",
".</w>",
],
["what</w>", "?</w>"],
]
self.token_index_tgt = [
[[0], [1], [2], [3], [4], [5], [6]],
[[0], [1], [2], [3, 4], [5, 6], [7], [8], [9, 10, 11]],
[[0], [1, 2], [3], [4], [5], [6], [7], [8, 9, 10, 11], [12], [13], [14, 15]],
[[0, 1]],
]
self.span_index_tgt = [
[(0, 4), (5, 7)],
[(0, 1), (3, 7)],
[(0, 3), (7, 16), (7, 12), (8, 16)],
[(0, 2)],
]
aligner_fn = retokenize.get_aligner_fn("openai-gpt")
token_aligners, tokens = zip(*(aligner_fn(sent) for sent in self.text))
token_aligners, tokens = list(token_aligners), list(tokens)
token_index_tgt = [
[token_aligner.project_tokens(idxs).tolist() for idxs in token_idxs]
for token_aligner, token_idxs in zip(token_aligners, self.token_index_src)
]
span_index_tgt = [
[token_aligner.project_span(start, end) for (start, end) in span_idxs]
for token_aligner, span_idxs in zip(token_aligners, self.span_index_src)
]
assert self.tokens == tokens
assert self.token_index_tgt == token_index_tgt
assert self.span_index_tgt == span_index_tgt
def test_sentencepiece(self):
self.tokens = [
["▁Members", "▁of", "▁the", "▁House", "▁clapped", "▁their", "▁hands"],
[
"▁I",
"▁look",
"▁at",
"▁Sarah",
"'",
"s",
"▁dog",
".",
"▁It",
"▁was",
"▁cute",
".",
"!",
],
[
"▁Mr",
".",
"▁I",
"m",
"mel",
"t",
"▁chose",
"▁to",
"▁focus",
"▁on",
"▁the",
"▁in",
"comp",
"re",
"hen",
"s",
"ibility",
"▁of",
"▁accounting",
"▁rules",
".",
],
["▁What", "?"],
]
self.token_index_tgt = [
[[0], [1], [2], [3], [4], [5], [6]],
[[0], [1], [2], [3, 4, 5], [6, 7], [8], [9], [10, 11, 12]],
[
[0, 1],
[2, 3, 4, 5],
[6],
[7],
[8],
[9],
[10],
[11, 12, 13, 14, 15, 16],
[17],
[18],
[19, 20],
],
[[0, 1]],
]
self.span_index_tgt = [
[(0, 4), (5, 7)],
[(0, 1), (3, 8)],
[(0, 6), (10, 21), (10, 17), (11, 21)],
[(0, 2)],
]
aligner_fn = retokenize.get_aligner_fn("xlnet-base-cased")
token_aligners, tokens = zip(*(aligner_fn(sent) for sent in self.text))
token_aligners, tokens = list(token_aligners), list(tokens)
token_index_tgt = [
[token_aligner.project_tokens(idxs).tolist() for idxs in token_idxs]
for token_aligner, token_idxs in zip(token_aligners, self.token_index_src)
]
span_index_tgt = [
[token_aligner.project_span(start, end) for (start, end) in span_idxs]
for token_aligner, span_idxs in zip(token_aligners, self.span_index_src)
]
assert self.tokens == tokens
assert self.token_index_tgt == token_index_tgt
assert self.span_index_tgt == span_index_tgt
def test_bytebpe(self):
self.tokens = [
["Members", "Ġof", "Ġthe", "ĠHouse", "Ġcl", "apped", "Ġtheir", "Ġhands"],
["I", "Ġlook", "Ġat", "ĠSarah", "'s", "Ġdog", ".", "ĠIt", "Ġwas", "Ġcute", ".", "!"],
[
"Mr",
".",
"ĠImm",
"elt",
"Ġchose",
"Ġto",
"Ġfocus",
"Ġon",
"Ġthe",
"Ġincomp",
"rehens",
"ibility",
"Ġof",
"Ġaccounting",
"Ġrules",
".",
],
["What", "?"],
]
self.token_index_tgt = [
[[0], [1], [2], [3], [4, 5], [6], [7]],
[[0], [1], [2], [3, 4], [5, 6], [7], [8], [9, 10, 11]],
[[0, 1], [2, 3], [4], [5], [6], [7], [8], [9, 10, 11], [12], [13], [14, 15]],
[[0, 1]],
]
self.span_index_tgt = [
[(0, 4), (6, 8)],
[(0, 1), (3, 7)],
[(0, 4), (8, 16), (8, 12), (9, 16)],
[(0, 2)],
]
aligner_fn = retokenize.get_aligner_fn("roberta-base")
token_aligners, tokens = zip(*(aligner_fn(sent) for sent in self.text))
token_aligners, tokens = list(token_aligners), list(tokens)
token_index_tgt = [
[token_aligner.project_tokens(idxs).tolist() for idxs in token_idxs]
for token_aligner, token_idxs in zip(token_aligners, self.token_index_src)
]
span_index_tgt = [
[token_aligner.project_span(start, end) for (start, end) in span_idxs]
for token_aligner, span_idxs in zip(token_aligners, self.span_index_src)
]
assert self.tokens == tokens
assert self.token_index_tgt == token_index_tgt
assert self.span_index_tgt == span_index_tgt
| 32.725714
| 97
| 0.370438
|
c7922c563c878a7b1856333335541f37d10996ca
| 357
|
py
|
Python
|
file_server/FileServer.py
|
CUrW-SL/cfcwm-cms
|
3888e724800395c478f1b63dab9f77d0afa3b2c4
|
[
"Apache-2.0"
] | null | null | null |
file_server/FileServer.py
|
CUrW-SL/cfcwm-cms
|
3888e724800395c478f1b63dab9f77d0afa3b2c4
|
[
"Apache-2.0"
] | null | null | null |
file_server/FileServer.py
|
CUrW-SL/cfcwm-cms
|
3888e724800395c478f1b63dab9f77d0afa3b2c4
|
[
"Apache-2.0"
] | null | null | null |
import http.server
import socketserver
import os
PORT = 8080
web_dir = os.path.join(os.path.dirname(__file__), 'data')
os.chdir(web_dir)
Handler = http.server.SimpleHTTPRequestHandler
# httpd = socketserver.TCPServer(("104.198.0.87", PORT), Handler)
httpd = socketserver.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()
| 25.5
| 65
| 0.756303
|
b44e8700dbc9101704d4ecc84484d312ea165aaa
| 11,214
|
py
|
Python
|
deploy/python/tracker/jde_tracker.py
|
qili93/PaddleDetection
|
47fc188b65ef8950ac7eb9ab383a2ac7c1cb5c39
|
[
"Apache-2.0"
] | 7,782
|
2019-10-25T09:39:37.000Z
|
2022-03-31T13:44:14.000Z
|
deploy/python/tracker/jde_tracker.py
|
qili93/PaddleDetection
|
47fc188b65ef8950ac7eb9ab383a2ac7c1cb5c39
|
[
"Apache-2.0"
] | 3,499
|
2019-10-29T12:37:40.000Z
|
2022-03-31T14:51:56.000Z
|
deploy/python/tracker/jde_tracker.py
|
qili93/PaddleDetection
|
47fc188b65ef8950ac7eb9ab383a2ac7c1cb5c39
|
[
"Apache-2.0"
] | 1,874
|
2019-10-28T04:21:58.000Z
|
2022-03-31T05:41:21.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is borrow from https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/tracker/multitracker.py
"""
import numpy as np
from ppdet.modeling.mot.matching import jde_matching as matching
from ppdet.modeling.mot.motion import KalmanFilter
from ppdet.modeling.mot.tracker.base_jde_tracker import TrackState, BaseTrack, STrack
from ppdet.modeling.mot.tracker.base_jde_tracker import joint_stracks, sub_stracks, remove_duplicate_stracks
__all__ = ['JDETracker']
class JDETracker(object):
"""
JDE tracker
Args:
det_thresh (float): threshold of detection score
track_buffer (int): buffer for tracker
min_box_area (int): min box area to filter out low quality boxes
tracked_thresh (float): linear assignment threshold of tracked
stracks and detections
r_tracked_thresh (float): linear assignment threshold of
tracked stracks and unmatched detections
unconfirmed_thresh (float): linear assignment threshold of
unconfirmed stracks and unmatched detections
motion (object): KalmanFilter instance
conf_thres (float): confidence threshold for tracking
metric_type (str): either "euclidean" or "cosine", the distance metric
used for measurement to track association.
"""
def __init__(self,
det_thresh=0.3,
track_buffer=30,
min_box_area=200,
tracked_thresh=0.7,
r_tracked_thresh=0.5,
unconfirmed_thresh=0.7,
motion='KalmanFilter',
conf_thres=0,
metric_type='euclidean'):
self.det_thresh = det_thresh
self.track_buffer = track_buffer
self.min_box_area = min_box_area
self.tracked_thresh = tracked_thresh
self.r_tracked_thresh = r_tracked_thresh
self.unconfirmed_thresh = unconfirmed_thresh
self.motion = KalmanFilter()
self.conf_thres = conf_thres
self.metric_type = metric_type
self.frame_id = 0
self.tracked_stracks = []
self.lost_stracks = []
self.removed_stracks = []
self.max_time_lost = 0
# max_time_lost will be calculated: int(frame_rate / 30.0 * track_buffer)
def update(self, pred_dets, pred_embs):
"""
Processes the image frame and finds bounding box(detections).
Associates the detection with corresponding tracklets and also handles
lost, removed, refound and active tracklets.
Args:
pred_dets (Tensor): Detection results of the image, shape is [N, 5].
pred_embs (Tensor): Embedding results of the image, shape is [N, 512].
Return:
output_stracks (list): The list contains information regarding the
online_tracklets for the recieved image tensor.
"""
self.frame_id += 1
activated_starcks = []
# for storing active tracks, for the current frame
refind_stracks = []
# Lost Tracks whose detections are obtained in the current frame
lost_stracks = []
# The tracks which are not obtained in the current frame but are not
# removed. (Lost for some time lesser than the threshold for removing)
removed_stracks = []
remain_inds = np.nonzero(pred_dets[:, 4] > self.conf_thres)
if len(remain_inds) == 0:
pred_dets = np.zeros([0, 1])
pred_embs = np.zeros([0, 1])
else:
pred_dets = pred_dets[remain_inds]
pred_embs = pred_embs[remain_inds]
# Filter out the image with box_num = 0. pred_dets = [[0.0, 0.0, 0.0 ,0.0]]
empty_pred = True if len(pred_dets) == 1 and np.sum(
pred_dets) == 0.0 else False
""" Step 1: Network forward, get detections & embeddings"""
if len(pred_dets) > 0 and not empty_pred:
detections = [
STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30)
for (tlbrs, f) in zip(pred_dets, pred_embs)
]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks:
if not track.is_activated:
# previous tracks which are not active in the current frame are added in unconfirmed list
unconfirmed.append(track)
else:
# Active tracks are added to the local list 'tracked_stracks'
tracked_stracks.append(track)
""" Step 2: First association, with embedding"""
# Combining currently tracked_stracks and lost_stracks
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
# Predict the current location with KF
STrack.multi_predict(strack_pool, self.motion)
dists = matching.embedding_distance(
strack_pool, detections, metric=self.metric_type)
dists = matching.fuse_motion(self.motion, dists, strack_pool,
detections)
# The dists is the list of distances of the detection with the tracks in strack_pool
matches, u_track, u_detection = matching.linear_assignment(
dists, thresh=self.tracked_thresh)
# The matches is the array for corresponding matches of the detection with the corresponding strack_pool
for itracked, idet in matches:
# itracked is the id of the track and idet is the detection
track = strack_pool[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
# If the track is active, add the detection to the track
track.update(detections[idet], self.frame_id)
activated_starcks.append(track)
else:
# We have obtained a detection from a track which is not active,
# hence put the track in refind_stracks list
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
# None of the steps below happen if there are no undetected tracks.
""" Step 3: Second association, with IOU"""
detections = [detections[i] for i in u_detection]
# detections is now a list of the unmatched detections
r_tracked_stracks = []
# This is container for stracks which were tracked till the previous
# frame but no detection was found for it in the current frame.
for i in u_track:
if strack_pool[i].state == TrackState.Tracked:
r_tracked_stracks.append(strack_pool[i])
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(
dists, thresh=self.r_tracked_thresh)
# matches is the list of detections which matched with corresponding
# tracks by IOU distance method.
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
# Same process done for some unmatched detections, but now considering IOU_distance as measure
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
# If no detections are obtained for tracks (u_track), the tracks are added to lost_tracks list and are marked lost
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(
dists, thresh=self.unconfirmed_thresh)
for itracked, idet in matches:
unconfirmed[itracked].update(detections[idet], self.frame_id)
activated_starcks.append(unconfirmed[itracked])
# The tracks which are yet not matched
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
# after all these confirmation steps, if a new detection is found, it is initialized for a new track
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate(self.motion, self.frame_id)
activated_starcks.append(track)
""" Step 5: Update state"""
# If the tracks are lost for more frames than the threshold number, the tracks are removed.
for track in self.lost_stracks:
if self.frame_id - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
# Update the self.tracked_stracks and self.lost_stracks using the updates in this step.
self.tracked_stracks = [
t for t in self.tracked_stracks if t.state == TrackState.Tracked
]
self.tracked_stracks = joint_stracks(self.tracked_stracks,
activated_starcks)
self.tracked_stracks = joint_stracks(self.tracked_stracks,
refind_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)
self.lost_stracks.extend(lost_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)
self.removed_stracks.extend(removed_stracks)
self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(
self.tracked_stracks, self.lost_stracks)
# get scores of lost tracks
output_stracks = [
track for track in self.tracked_stracks if track.is_activated
]
return output_stracks
| 46.53112
| 123
| 0.626984
|
975d2ca1cc0edde6ed956e770972e211890149d5
| 3,971
|
py
|
Python
|
testbed/test_common_request_handler/test_gevent.py
|
isaaczinda/opentracing-python
|
16cf899bf65be62f258084930bc1b0107c445c67
|
[
"Apache-2.0"
] | 795
|
2016-01-18T16:48:22.000Z
|
2022-02-22T13:36:57.000Z
|
testbed/test_common_request_handler/test_gevent.py
|
isaaczinda/opentracing-python
|
16cf899bf65be62f258084930bc1b0107c445c67
|
[
"Apache-2.0"
] | 121
|
2016-01-07T01:53:30.000Z
|
2022-03-24T11:07:04.000Z
|
testbed/test_common_request_handler/test_gevent.py
|
isaaczinda/opentracing-python
|
16cf899bf65be62f258084930bc1b0107c445c67
|
[
"Apache-2.0"
] | 124
|
2016-01-07T01:49:37.000Z
|
2021-12-30T01:11:54.000Z
|
from __future__ import print_function
import gevent
from opentracing.ext import tags
from opentracing.mocktracer import MockTracer
from opentracing.scope_managers.gevent import GeventScopeManager
from ..testcase import OpenTracingTestCase
from ..utils import get_logger, get_one_by_operation_name
from .request_handler import RequestHandler
logger = get_logger(__name__)
class Client(object):
def __init__(self, request_handler):
self.request_handler = request_handler
def send_task(self, message):
request_context = {}
def before_handler():
self.request_handler.before_request(message, request_context)
def after_handler():
self.request_handler.after_request(message, request_context)
gevent.spawn(before_handler).join()
gevent.spawn(after_handler).join()
return '%s::response' % message
def send(self, message):
return gevent.spawn(self.send_task, message)
def send_sync(self, message, timeout=5.0):
return gevent.spawn(self.send_task, message).get(timeout=timeout)
class TestGevent(OpenTracingTestCase):
"""
There is only one instance of 'RequestHandler' per 'Client'. Methods of
'RequestHandler' are executed in different greenlets, and no Span
propagation among them is done automatically.
Therefore we cannot use current active span and activate span.
So one issue here is setting correct parent span.
"""
def setUp(self):
self.tracer = MockTracer(GeventScopeManager())
self.client = Client(RequestHandler(self.tracer))
def test_two_callbacks(self):
response_greenlet1 = gevent.spawn(self.client.send_task, 'message1')
response_greenlet2 = gevent.spawn(self.client.send_task, 'message2')
gevent.joinall([response_greenlet1, response_greenlet2])
self.assertEquals('message1::response', response_greenlet1.get())
self.assertEquals('message2::response', response_greenlet2.get())
spans = self.tracer.finished_spans()
self.assertEquals(len(spans), 2)
for span in spans:
self.assertEquals(span.tags.get(tags.SPAN_KIND, None),
tags.SPAN_KIND_RPC_CLIENT)
self.assertNotSameTrace(spans[0], spans[1])
self.assertIsNone(spans[0].parent_id)
self.assertIsNone(spans[1].parent_id)
def test_parent_not_picked(self):
"""Active parent should not be picked up by child."""
with self.tracer.start_active_span('parent'):
response = self.client.send_sync('no_parent')
self.assertEquals('no_parent::response', response)
spans = self.tracer.finished_spans()
self.assertEquals(len(spans), 2)
child_span = get_one_by_operation_name(spans, 'send')
self.assertIsNotNone(child_span)
parent_span = get_one_by_operation_name(spans, 'parent')
self.assertIsNotNone(parent_span)
# Here check that there is no parent-child relation.
self.assertIsNotChildOf(child_span, parent_span)
def test_bad_solution_to_set_parent(self):
"""Solution is bad because parent is per client
(we don't have better choice)"""
with self.tracer.start_active_span('parent') as scope:
client = Client(RequestHandler(self.tracer, scope.span.context))
response = client.send_sync('correct_parent')
self.assertEquals('correct_parent::response', response)
response = client.send_sync('wrong_parent')
self.assertEquals('wrong_parent::response', response)
spans = self.tracer.finished_spans()
self.assertEquals(len(spans), 3)
spans = sorted(spans, key=lambda x: x.start_time)
parent_span = get_one_by_operation_name(spans, 'parent')
self.assertIsNotNone(parent_span)
self.assertIsChildOf(spans[1], parent_span)
self.assertIsChildOf(spans[2], parent_span)
| 34.530435
| 76
| 0.694535
|
f2cdd8d69ef1b3539737ecbd0171171626aa896c
| 4,732
|
py
|
Python
|
sofascore/__init__.py
|
shimst3r/pysofa
|
67265a1966126817b722300aabd8ce1ef8a14284
|
[
"Apache-2.0"
] | null | null | null |
sofascore/__init__.py
|
shimst3r/pysofa
|
67265a1966126817b722300aabd8ce1ef8a14284
|
[
"Apache-2.0"
] | 1
|
2021-07-02T09:39:21.000Z
|
2021-07-02T09:49:53.000Z
|
sofascore/__init__.py
|
shimst3r/pysofa
|
67265a1966126817b722300aabd8ce1ef8a14284
|
[
"Apache-2.0"
] | 1
|
2021-07-02T09:40:56.000Z
|
2021-07-02T09:40:56.000Z
|
# -*- coding: utf-8 -*-
"""
Copyright 2021 shimst3r
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
sofascore computes the Sepsis-related Organ Failure Assessment (SOFA) score
according to Singer et al.:
https://doi.org/10.1001%2Fjama.2016.0287
"""
from typing import NamedTuple, Optional
__version__ = "1.2.0"
class Catecholamine(NamedTuple):
name: str
dosage: float
class Condition(NamedTuple):
mean_arterial_pressure: float
catecholamine: Optional[Catecholamine]
platelets_count: int
creatinine_level: float
bilirubin_level: float
glasgow_coma_scale: int
partial_pressure_of_oxygen: float
is_mechanically_ventilated: bool
def compute(condition: Condition) -> int:
cvs_score = compute_score_for_cardiovascular_system(
mean_arterial_pressure=condition.mean_arterial_pressure,
catecholamine=condition.catecholamine,
)
cg_score = compute_score_for_coagulation(platelets_count=condition.platelets_count)
kdny_score = compute_score_for_kidneys(creatinine_level=condition.creatinine_level)
livr_score = compute_score_for_liver(bilirubin_level=condition.bilirubin_level)
ns_score = compute_score_for_nervous_system(
glasgow_coma_scale=condition.glasgow_coma_scale
)
rs_score = compute_score_for_respiratory_system(
partial_pressure_of_oxygen=condition.partial_pressure_of_oxygen,
is_mechanically_ventilated=condition.is_mechanically_ventilated,
)
return cvs_score + cg_score + kdny_score + livr_score + ns_score + rs_score
def compute_score_for_cardiovascular_system(
mean_arterial_pressure: float, catecholamine: Optional[Catecholamine]
) -> int:
"""
Computes score based on mean arterial pressure or catecholamine therapy.
"""
if catecholamine:
if catecholamine.name == "dopamine":
if catecholamine.dosage <= 5:
return 2
if catecholamine.dosage < 15:
return 3
return 4
if catecholamine.name == "dobutamine":
return 2
if catecholamine.name in {"epinephrine", "norepinephrine"}:
if catecholamine.dosage <= 0.1:
return 3
return 4
if mean_arterial_pressure < 70:
return 1
return 0
def compute_score_for_coagulation(platelets_count: int) -> int:
"""
Computes score based on platelets count (unit is number per microliter).
"""
if platelets_count < 20_000:
return 4
if platelets_count < 50_000:
return 3
if platelets_count < 100_000:
return 2
if platelets_count < 150_000:
return 1
return 0
def compute_score_for_kidneys(creatinine_level: float) -> int:
"""Computes score based on Creatinine level (unit is mg/dl)."""
if creatinine_level >= 5.0:
return 4
if creatinine_level >= 3.5:
return 3
if creatinine_level >= 2.0:
return 2
if creatinine_level >= 1.2:
return 1
return 0
def compute_score_for_liver(bilirubin_level: float) -> int:
"""Computes score based on Bilirubin level (unit is mg/dl)."""
if bilirubin_level >= 12.0:
return 4
if bilirubin_level >= 6.0:
return 3
if bilirubin_level >= 2.0:
return 2
if bilirubin_level >= 1.2:
return 1
return 0
def compute_score_for_nervous_system(glasgow_coma_scale: int) -> int:
"""
Computes score based on Glasgow Coma Scale, see paper by Teasdale et al.:
https://doi.org/10.1016/S0140-6736(74)91639-0
"""
if glasgow_coma_scale < 6:
return 4
if glasgow_coma_scale < 10:
return 3
if glasgow_coma_scale < 13:
return 2
if glasgow_coma_scale < 15:
return 1
return 0
def compute_score_for_respiratory_system(
partial_pressure_of_oxygen: float, is_mechanically_ventilated: bool
) -> int:
"""Computes score based on PaO2 (unit is mmHg)."""
if partial_pressure_of_oxygen < 100 and is_mechanically_ventilated:
return 4
if partial_pressure_of_oxygen < 200 and is_mechanically_ventilated:
return 3
if partial_pressure_of_oxygen < 300:
return 2
if partial_pressure_of_oxygen < 400:
return 1
return 0
| 30.727273
| 87
| 0.698225
|
9d99b967807d6d4f3efcff047392492074c7d6e3
| 1,537
|
py
|
Python
|
tests/test_class_oelint_vars_fileextrapaths.py
|
Rahix/oelint-adv
|
b9dc381b181a8bdc7300bb5070f80bf90950efbd
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_class_oelint_vars_fileextrapaths.py
|
Rahix/oelint-adv
|
b9dc381b181a8bdc7300bb5070f80bf90950efbd
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_class_oelint_vars_fileextrapaths.py
|
Rahix/oelint-adv
|
b9dc381b181a8bdc7300bb5070f80bf90950efbd
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import sys
import pytest
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from base import TestBaseClass
class TestClassOelintVarsFilextrapaths(TestBaseClass):
@pytest.mark.parametrize('id', ['oelint.vars.fileextrapaths'])
@pytest.mark.parametrize('occurance', [1])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
FILESEXTRAPATHS_prepend := "${THISDIR}/file"
'''
},
{
'oelint_adv_test.bb':
'''
FILESEXTRAPATHS_append := "${THISDIR}/file"
'''
},
{
'oelint_adv_test.bb':
'''
FILESEXTRAPATHS += "${THISDIR}/file"
'''
}
],
)
def test_bad(self, input, id, occurance):
self.check_for_id(self._create_args(input), id, occurance)
@pytest.mark.parametrize('id', ['oelint.vars.fileextrapaths'])
@pytest.mark.parametrize('occurance', [0])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bbappend':
'''
FILESEXTRAPATHS_prepend := "${THISDIR}/file"
'''
},
{
'oelint_adv_test.bbappend':
'''
FILESEXTRAPATHS_append := "${THISDIR}/file"
'''
},
],
)
def test_good(self, input, id, occurance):
self.check_for_id(self._create_args(input), id, occurance)
| 26.5
| 66
| 0.506181
|
dc5305729d377c93f00785fff930682a2e9b8a67
| 23,202
|
py
|
Python
|
src/users/migrations/0002_profile_tz.py
|
Onnryo/bug-tracker
|
0f60ca28b2395e9141e4a7ba5e2f9ee0dfbf12cd
|
[
"MIT"
] | 1
|
2021-06-18T14:56:08.000Z
|
2021-06-18T14:56:08.000Z
|
src/users/migrations/0002_profile_tz.py
|
Onnryo/bug-tracker
|
0f60ca28b2395e9141e4a7ba5e2f9ee0dfbf12cd
|
[
"MIT"
] | 20
|
2021-06-17T08:40:22.000Z
|
2021-07-07T03:58:24.000Z
|
src/users/migrations/0002_profile_tz.py
|
Onnryo/bug-tracker
|
0f60ca28b2395e9141e4a7ba5e2f9ee0dfbf12cd
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.3 on 2021-07-03 06:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='tz',
field=models.CharField(choices=[('Africa/Abidjan', 'Africa/Abidjan'), ('Africa/Accra', 'Africa/Accra'), ('Africa/Addis_Ababa', 'Africa/Addis_Ababa'), ('Africa/Algiers', 'Africa/Algiers'), ('Africa/Asmara', 'Africa/Asmara'), ('Africa/Asmera', 'Africa/Asmera'), ('Africa/Bamako', 'Africa/Bamako'), ('Africa/Bangui', 'Africa/Bangui'), ('Africa/Banjul', 'Africa/Banjul'), ('Africa/Bissau', 'Africa/Bissau'), ('Africa/Blantyre', 'Africa/Blantyre'), ('Africa/Brazzaville', 'Africa/Brazzaville'), ('Africa/Bujumbura', 'Africa/Bujumbura'), ('Africa/Cairo', 'Africa/Cairo'), ('Africa/Casablanca', 'Africa/Casablanca'), ('Africa/Ceuta', 'Africa/Ceuta'), ('Africa/Conakry', 'Africa/Conakry'), ('Africa/Dakar', 'Africa/Dakar'), ('Africa/Dar_es_Salaam', 'Africa/Dar_es_Salaam'), ('Africa/Djibouti', 'Africa/Djibouti'), ('Africa/Douala', 'Africa/Douala'), ('Africa/El_Aaiun', 'Africa/El_Aaiun'), ('Africa/Freetown', 'Africa/Freetown'), ('Africa/Gaborone', 'Africa/Gaborone'), ('Africa/Harare', 'Africa/Harare'), ('Africa/Johannesburg', 'Africa/Johannesburg'), ('Africa/Juba', 'Africa/Juba'), ('Africa/Kampala', 'Africa/Kampala'), ('Africa/Khartoum', 'Africa/Khartoum'), ('Africa/Kigali', 'Africa/Kigali'), ('Africa/Kinshasa', 'Africa/Kinshasa'), ('Africa/Lagos', 'Africa/Lagos'), ('Africa/Libreville', 'Africa/Libreville'), ('Africa/Lome', 'Africa/Lome'), ('Africa/Luanda', 'Africa/Luanda'), ('Africa/Lubumbashi', 'Africa/Lubumbashi'), ('Africa/Lusaka', 'Africa/Lusaka'), ('Africa/Malabo', 'Africa/Malabo'), ('Africa/Maputo', 'Africa/Maputo'), ('Africa/Maseru', 'Africa/Maseru'), ('Africa/Mbabane', 'Africa/Mbabane'), ('Africa/Mogadishu', 'Africa/Mogadishu'), ('Africa/Monrovia', 'Africa/Monrovia'), ('Africa/Nairobi', 'Africa/Nairobi'), ('Africa/Ndjamena', 'Africa/Ndjamena'), ('Africa/Niamey', 'Africa/Niamey'), ('Africa/Nouakchott', 'Africa/Nouakchott'), ('Africa/Ouagadougou', 'Africa/Ouagadougou'), ('Africa/Porto-Novo', 'Africa/Porto-Novo'), ('Africa/Sao_Tome', 'Africa/Sao_Tome'), ('Africa/Timbuktu', 'Africa/Timbuktu'), ('Africa/Tripoli', 'Africa/Tripoli'), ('Africa/Tunis', 'Africa/Tunis'), ('Africa/Windhoek', 'Africa/Windhoek'), ('America/Adak', 'America/Adak'), ('America/Anchorage', 'America/Anchorage'), ('America/Anguilla', 'America/Anguilla'), ('America/Antigua', 'America/Antigua'), ('America/Araguaina', 'America/Araguaina'), ('America/Argentina/Buenos_Aires', 'America/Argentina/Buenos_Aires'), ('America/Argentina/Catamarca', 'America/Argentina/Catamarca'), ('America/Argentina/ComodRivadavia', 'America/Argentina/ComodRivadavia'), ('America/Argentina/Cordoba', 'America/Argentina/Cordoba'), ('America/Argentina/Jujuy', 'America/Argentina/Jujuy'), ('America/Argentina/La_Rioja', 'America/Argentina/La_Rioja'), ('America/Argentina/Mendoza', 'America/Argentina/Mendoza'), ('America/Argentina/Rio_Gallegos', 'America/Argentina/Rio_Gallegos'), ('America/Argentina/Salta', 'America/Argentina/Salta'), ('America/Argentina/San_Juan', 'America/Argentina/San_Juan'), ('America/Argentina/San_Luis', 'America/Argentina/San_Luis'), ('America/Argentina/Tucuman', 'America/Argentina/Tucuman'), ('America/Argentina/Ushuaia', 'America/Argentina/Ushuaia'), ('America/Aruba', 'America/Aruba'), ('America/Asuncion', 'America/Asuncion'), ('America/Atikokan', 'America/Atikokan'), ('America/Atka', 'America/Atka'), ('America/Bahia', 'America/Bahia'), ('America/Bahia_Banderas', 'America/Bahia_Banderas'), ('America/Barbados', 'America/Barbados'), ('America/Belem', 'America/Belem'), ('America/Belize', 'America/Belize'), ('America/Blanc-Sablon', 'America/Blanc-Sablon'), ('America/Boa_Vista', 'America/Boa_Vista'), ('America/Bogota', 'America/Bogota'), ('America/Boise', 'America/Boise'), ('America/Buenos_Aires', 'America/Buenos_Aires'), ('America/Cambridge_Bay', 'America/Cambridge_Bay'), ('America/Campo_Grande', 'America/Campo_Grande'), ('America/Cancun', 'America/Cancun'), ('America/Caracas', 'America/Caracas'), ('America/Catamarca', 'America/Catamarca'), ('America/Cayenne', 'America/Cayenne'), ('America/Cayman', 'America/Cayman'), ('America/Chicago', 'America/Chicago'), ('America/Chihuahua', 'America/Chihuahua'), ('America/Coral_Harbour', 'America/Coral_Harbour'), ('America/Cordoba', 'America/Cordoba'), ('America/Costa_Rica', 'America/Costa_Rica'), ('America/Creston', 'America/Creston'), ('America/Cuiaba', 'America/Cuiaba'), ('America/Curacao', 'America/Curacao'), ('America/Danmarkshavn', 'America/Danmarkshavn'), ('America/Dawson', 'America/Dawson'), ('America/Dawson_Creek', 'America/Dawson_Creek'), ('America/Denver', 'America/Denver'), ('America/Detroit', 'America/Detroit'), ('America/Dominica', 'America/Dominica'), ('America/Edmonton', 'America/Edmonton'), ('America/Eirunepe', 'America/Eirunepe'), ('America/El_Salvador', 'America/El_Salvador'), ('America/Ensenada', 'America/Ensenada'), ('America/Fort_Nelson', 'America/Fort_Nelson'), ('America/Fort_Wayne', 'America/Fort_Wayne'), ('America/Fortaleza', 'America/Fortaleza'), ('America/Glace_Bay', 'America/Glace_Bay'), ('America/Godthab', 'America/Godthab'), ('America/Goose_Bay', 'America/Goose_Bay'), ('America/Grand_Turk', 'America/Grand_Turk'), ('America/Grenada', 'America/Grenada'), ('America/Guadeloupe', 'America/Guadeloupe'), ('America/Guatemala', 'America/Guatemala'), ('America/Guayaquil', 'America/Guayaquil'), ('America/Guyana', 'America/Guyana'), ('America/Halifax', 'America/Halifax'), ('America/Havana', 'America/Havana'), ('America/Hermosillo', 'America/Hermosillo'), ('America/Indiana/Indianapolis', 'America/Indiana/Indianapolis'), ('America/Indiana/Knox', 'America/Indiana/Knox'), ('America/Indiana/Marengo', 'America/Indiana/Marengo'), ('America/Indiana/Petersburg', 'America/Indiana/Petersburg'), ('America/Indiana/Tell_City', 'America/Indiana/Tell_City'), ('America/Indiana/Vevay', 'America/Indiana/Vevay'), ('America/Indiana/Vincennes', 'America/Indiana/Vincennes'), ('America/Indiana/Winamac', 'America/Indiana/Winamac'), ('America/Indianapolis', 'America/Indianapolis'), ('America/Inuvik', 'America/Inuvik'), ('America/Iqaluit', 'America/Iqaluit'), ('America/Jamaica', 'America/Jamaica'), ('America/Jujuy', 'America/Jujuy'), ('America/Juneau', 'America/Juneau'), ('America/Kentucky/Louisville', 'America/Kentucky/Louisville'), ('America/Kentucky/Monticello', 'America/Kentucky/Monticello'), ('America/Knox_IN', 'America/Knox_IN'), ('America/Kralendijk', 'America/Kralendijk'), ('America/La_Paz', 'America/La_Paz'), ('America/Lima', 'America/Lima'), ('America/Los_Angeles', 'America/Los_Angeles'), ('America/Louisville', 'America/Louisville'), ('America/Lower_Princes', 'America/Lower_Princes'), ('America/Maceio', 'America/Maceio'), ('America/Managua', 'America/Managua'), ('America/Manaus', 'America/Manaus'), ('America/Marigot', 'America/Marigot'), ('America/Martinique', 'America/Martinique'), ('America/Matamoros', 'America/Matamoros'), ('America/Mazatlan', 'America/Mazatlan'), ('America/Mendoza', 'America/Mendoza'), ('America/Menominee', 'America/Menominee'), ('America/Merida', 'America/Merida'), ('America/Metlakatla', 'America/Metlakatla'), ('America/Mexico_City', 'America/Mexico_City'), ('America/Miquelon', 'America/Miquelon'), ('America/Moncton', 'America/Moncton'), ('America/Monterrey', 'America/Monterrey'), ('America/Montevideo', 'America/Montevideo'), ('America/Montreal', 'America/Montreal'), ('America/Montserrat', 'America/Montserrat'), ('America/Nassau', 'America/Nassau'), ('America/New_York', 'America/New_York'), ('America/Nipigon', 'America/Nipigon'), ('America/Nome', 'America/Nome'), ('America/Noronha', 'America/Noronha'), ('America/North_Dakota/Beulah', 'America/North_Dakota/Beulah'), ('America/North_Dakota/Center', 'America/North_Dakota/Center'), ('America/North_Dakota/New_Salem', 'America/North_Dakota/New_Salem'), ('America/Nuuk', 'America/Nuuk'), ('America/Ojinaga', 'America/Ojinaga'), ('America/Panama', 'America/Panama'), ('America/Pangnirtung', 'America/Pangnirtung'), ('America/Paramaribo', 'America/Paramaribo'), ('America/Phoenix', 'America/Phoenix'), ('America/Port-au-Prince', 'America/Port-au-Prince'), ('America/Port_of_Spain', 'America/Port_of_Spain'), ('America/Porto_Acre', 'America/Porto_Acre'), ('America/Porto_Velho', 'America/Porto_Velho'), ('America/Puerto_Rico', 'America/Puerto_Rico'), ('America/Punta_Arenas', 'America/Punta_Arenas'), ('America/Rainy_River', 'America/Rainy_River'), ('America/Rankin_Inlet', 'America/Rankin_Inlet'), ('America/Recife', 'America/Recife'), ('America/Regina', 'America/Regina'), ('America/Resolute', 'America/Resolute'), ('America/Rio_Branco', 'America/Rio_Branco'), ('America/Rosario', 'America/Rosario'), ('America/Santa_Isabel', 'America/Santa_Isabel'), ('America/Santarem', 'America/Santarem'), ('America/Santiago', 'America/Santiago'), ('America/Santo_Domingo', 'America/Santo_Domingo'), ('America/Sao_Paulo', 'America/Sao_Paulo'), ('America/Scoresbysund', 'America/Scoresbysund'), ('America/Shiprock', 'America/Shiprock'), ('America/Sitka', 'America/Sitka'), ('America/St_Barthelemy', 'America/St_Barthelemy'), ('America/St_Johns', 'America/St_Johns'), ('America/St_Kitts', 'America/St_Kitts'), ('America/St_Lucia', 'America/St_Lucia'), ('America/St_Thomas', 'America/St_Thomas'), ('America/St_Vincent', 'America/St_Vincent'), ('America/Swift_Current', 'America/Swift_Current'), ('America/Tegucigalpa', 'America/Tegucigalpa'), ('America/Thule', 'America/Thule'), ('America/Thunder_Bay', 'America/Thunder_Bay'), ('America/Tijuana', 'America/Tijuana'), ('America/Toronto', 'America/Toronto'), ('America/Tortola', 'America/Tortola'), ('America/Vancouver', 'America/Vancouver'), ('America/Virgin', 'America/Virgin'), ('America/Whitehorse', 'America/Whitehorse'), ('America/Winnipeg', 'America/Winnipeg'), ('America/Yakutat', 'America/Yakutat'), ('America/Yellowknife', 'America/Yellowknife'), ('Antarctica/Casey', 'Antarctica/Casey'), ('Antarctica/Davis', 'Antarctica/Davis'), ('Antarctica/DumontDUrville', 'Antarctica/DumontDUrville'), ('Antarctica/Macquarie', 'Antarctica/Macquarie'), ('Antarctica/Mawson', 'Antarctica/Mawson'), ('Antarctica/McMurdo', 'Antarctica/McMurdo'), ('Antarctica/Palmer', 'Antarctica/Palmer'), ('Antarctica/Rothera', 'Antarctica/Rothera'), ('Antarctica/South_Pole', 'Antarctica/South_Pole'), ('Antarctica/Syowa', 'Antarctica/Syowa'), ('Antarctica/Troll', 'Antarctica/Troll'), ('Antarctica/Vostok', 'Antarctica/Vostok'), ('Arctic/Longyearbyen', 'Arctic/Longyearbyen'), ('Asia/Aden', 'Asia/Aden'), ('Asia/Almaty', 'Asia/Almaty'), ('Asia/Amman', 'Asia/Amman'), ('Asia/Anadyr', 'Asia/Anadyr'), ('Asia/Aqtau', 'Asia/Aqtau'), ('Asia/Aqtobe', 'Asia/Aqtobe'), ('Asia/Ashgabat', 'Asia/Ashgabat'), ('Asia/Ashkhabad', 'Asia/Ashkhabad'), ('Asia/Atyrau', 'Asia/Atyrau'), ('Asia/Baghdad', 'Asia/Baghdad'), ('Asia/Bahrain', 'Asia/Bahrain'), ('Asia/Baku', 'Asia/Baku'), ('Asia/Bangkok', 'Asia/Bangkok'), ('Asia/Barnaul', 'Asia/Barnaul'), ('Asia/Beirut', 'Asia/Beirut'), ('Asia/Bishkek', 'Asia/Bishkek'), ('Asia/Brunei', 'Asia/Brunei'), ('Asia/Calcutta', 'Asia/Calcutta'), ('Asia/Chita', 'Asia/Chita'), ('Asia/Choibalsan', 'Asia/Choibalsan'), ('Asia/Chongqing', 'Asia/Chongqing'), ('Asia/Chungking', 'Asia/Chungking'), ('Asia/Colombo', 'Asia/Colombo'), ('Asia/Dacca', 'Asia/Dacca'), ('Asia/Damascus', 'Asia/Damascus'), ('Asia/Dhaka', 'Asia/Dhaka'), ('Asia/Dili', 'Asia/Dili'), ('Asia/Dubai', 'Asia/Dubai'), ('Asia/Dushanbe', 'Asia/Dushanbe'), ('Asia/Famagusta', 'Asia/Famagusta'), ('Asia/Gaza', 'Asia/Gaza'), ('Asia/Harbin', 'Asia/Harbin'), ('Asia/Hebron', 'Asia/Hebron'), ('Asia/Ho_Chi_Minh', 'Asia/Ho_Chi_Minh'), ('Asia/Hong_Kong', 'Asia/Hong_Kong'), ('Asia/Hovd', 'Asia/Hovd'), ('Asia/Irkutsk', 'Asia/Irkutsk'), ('Asia/Istanbul', 'Asia/Istanbul'), ('Asia/Jakarta', 'Asia/Jakarta'), ('Asia/Jayapura', 'Asia/Jayapura'), ('Asia/Jerusalem', 'Asia/Jerusalem'), ('Asia/Kabul', 'Asia/Kabul'), ('Asia/Kamchatka', 'Asia/Kamchatka'), ('Asia/Karachi', 'Asia/Karachi'), ('Asia/Kashgar', 'Asia/Kashgar'), ('Asia/Kathmandu', 'Asia/Kathmandu'), ('Asia/Katmandu', 'Asia/Katmandu'), ('Asia/Khandyga', 'Asia/Khandyga'), ('Asia/Kolkata', 'Asia/Kolkata'), ('Asia/Krasnoyarsk', 'Asia/Krasnoyarsk'), ('Asia/Kuala_Lumpur', 'Asia/Kuala_Lumpur'), ('Asia/Kuching', 'Asia/Kuching'), ('Asia/Kuwait', 'Asia/Kuwait'), ('Asia/Macao', 'Asia/Macao'), ('Asia/Macau', 'Asia/Macau'), ('Asia/Magadan', 'Asia/Magadan'), ('Asia/Makassar', 'Asia/Makassar'), ('Asia/Manila', 'Asia/Manila'), ('Asia/Muscat', 'Asia/Muscat'), ('Asia/Nicosia', 'Asia/Nicosia'), ('Asia/Novokuznetsk', 'Asia/Novokuznetsk'), ('Asia/Novosibirsk', 'Asia/Novosibirsk'), ('Asia/Omsk', 'Asia/Omsk'), ('Asia/Oral', 'Asia/Oral'), ('Asia/Phnom_Penh', 'Asia/Phnom_Penh'), ('Asia/Pontianak', 'Asia/Pontianak'), ('Asia/Pyongyang', 'Asia/Pyongyang'), ('Asia/Qatar', 'Asia/Qatar'), ('Asia/Qostanay', 'Asia/Qostanay'), ('Asia/Qyzylorda', 'Asia/Qyzylorda'), ('Asia/Rangoon', 'Asia/Rangoon'), ('Asia/Riyadh', 'Asia/Riyadh'), ('Asia/Saigon', 'Asia/Saigon'), ('Asia/Sakhalin', 'Asia/Sakhalin'), ('Asia/Samarkand', 'Asia/Samarkand'), ('Asia/Seoul', 'Asia/Seoul'), ('Asia/Shanghai', 'Asia/Shanghai'), ('Asia/Singapore', 'Asia/Singapore'), ('Asia/Srednekolymsk', 'Asia/Srednekolymsk'), ('Asia/Taipei', 'Asia/Taipei'), ('Asia/Tashkent', 'Asia/Tashkent'), ('Asia/Tbilisi', 'Asia/Tbilisi'), ('Asia/Tehran', 'Asia/Tehran'), ('Asia/Tel_Aviv', 'Asia/Tel_Aviv'), ('Asia/Thimbu', 'Asia/Thimbu'), ('Asia/Thimphu', 'Asia/Thimphu'), ('Asia/Tokyo', 'Asia/Tokyo'), ('Asia/Tomsk', 'Asia/Tomsk'), ('Asia/Ujung_Pandang', 'Asia/Ujung_Pandang'), ('Asia/Ulaanbaatar', 'Asia/Ulaanbaatar'), ('Asia/Ulan_Bator', 'Asia/Ulan_Bator'), ('Asia/Urumqi', 'Asia/Urumqi'), ('Asia/Ust-Nera', 'Asia/Ust-Nera'), ('Asia/Vientiane', 'Asia/Vientiane'), ('Asia/Vladivostok', 'Asia/Vladivostok'), ('Asia/Yakutsk', 'Asia/Yakutsk'), ('Asia/Yangon', 'Asia/Yangon'), ('Asia/Yekaterinburg', 'Asia/Yekaterinburg'), ('Asia/Yerevan', 'Asia/Yerevan'), ('Atlantic/Azores', 'Atlantic/Azores'), ('Atlantic/Bermuda', 'Atlantic/Bermuda'), ('Atlantic/Canary', 'Atlantic/Canary'), ('Atlantic/Cape_Verde', 'Atlantic/Cape_Verde'), ('Atlantic/Faeroe', 'Atlantic/Faeroe'), ('Atlantic/Faroe', 'Atlantic/Faroe'), ('Atlantic/Jan_Mayen', 'Atlantic/Jan_Mayen'), ('Atlantic/Madeira', 'Atlantic/Madeira'), ('Atlantic/Reykjavik', 'Atlantic/Reykjavik'), ('Atlantic/South_Georgia', 'Atlantic/South_Georgia'), ('Atlantic/St_Helena', 'Atlantic/St_Helena'), ('Atlantic/Stanley', 'Atlantic/Stanley'), ('Australia/ACT', 'Australia/ACT'), ('Australia/Adelaide', 'Australia/Adelaide'), ('Australia/Brisbane', 'Australia/Brisbane'), ('Australia/Broken_Hill', 'Australia/Broken_Hill'), ('Australia/Canberra', 'Australia/Canberra'), ('Australia/Currie', 'Australia/Currie'), ('Australia/Darwin', 'Australia/Darwin'), ('Australia/Eucla', 'Australia/Eucla'), ('Australia/Hobart', 'Australia/Hobart'), ('Australia/LHI', 'Australia/LHI'), ('Australia/Lindeman', 'Australia/Lindeman'), ('Australia/Lord_Howe', 'Australia/Lord_Howe'), ('Australia/Melbourne', 'Australia/Melbourne'), ('Australia/NSW', 'Australia/NSW'), ('Australia/North', 'Australia/North'), ('Australia/Perth', 'Australia/Perth'), ('Australia/Queensland', 'Australia/Queensland'), ('Australia/South', 'Australia/South'), ('Australia/Sydney', 'Australia/Sydney'), ('Australia/Tasmania', 'Australia/Tasmania'), ('Australia/Victoria', 'Australia/Victoria'), ('Australia/West', 'Australia/West'), ('Australia/Yancowinna', 'Australia/Yancowinna'), ('Brazil/Acre', 'Brazil/Acre'), ('Brazil/DeNoronha', 'Brazil/DeNoronha'), ('Brazil/East', 'Brazil/East'), ('Brazil/West', 'Brazil/West'), ('CET', 'CET'), ('CST6CDT', 'CST6CDT'), ('Canada/Atlantic', 'Canada/Atlantic'), ('Canada/Central', 'Canada/Central'), ('Canada/Eastern', 'Canada/Eastern'), ('Canada/Mountain', 'Canada/Mountain'), ('Canada/Newfoundland', 'Canada/Newfoundland'), ('Canada/Pacific', 'Canada/Pacific'), ('Canada/Saskatchewan', 'Canada/Saskatchewan'), ('Canada/Yukon', 'Canada/Yukon'), ('Chile/Continental', 'Chile/Continental'), ('Chile/EasterIsland', 'Chile/EasterIsland'), ('Cuba', 'Cuba'), ('EET', 'EET'), ('EST', 'EST'), ('EST5EDT', 'EST5EDT'), ('Egypt', 'Egypt'), ('Eire', 'Eire'), ('Etc/GMT', 'Etc/GMT'), ('Etc/GMT+0', 'Etc/GMT+0'), ('Etc/GMT+1', 'Etc/GMT+1'), ('Etc/GMT+10', 'Etc/GMT+10'), ('Etc/GMT+11', 'Etc/GMT+11'), ('Etc/GMT+12', 'Etc/GMT+12'), ('Etc/GMT+2', 'Etc/GMT+2'), ('Etc/GMT+3', 'Etc/GMT+3'), ('Etc/GMT+4', 'Etc/GMT+4'), ('Etc/GMT+5', 'Etc/GMT+5'), ('Etc/GMT+6', 'Etc/GMT+6'), ('Etc/GMT+7', 'Etc/GMT+7'), ('Etc/GMT+8', 'Etc/GMT+8'), ('Etc/GMT+9', 'Etc/GMT+9'), ('Etc/GMT-0', 'Etc/GMT-0'), ('Etc/GMT-1', 'Etc/GMT-1'), ('Etc/GMT-10', 'Etc/GMT-10'), ('Etc/GMT-11', 'Etc/GMT-11'), ('Etc/GMT-12', 'Etc/GMT-12'), ('Etc/GMT-13', 'Etc/GMT-13'), ('Etc/GMT-14', 'Etc/GMT-14'), ('Etc/GMT-2', 'Etc/GMT-2'), ('Etc/GMT-3', 'Etc/GMT-3'), ('Etc/GMT-4', 'Etc/GMT-4'), ('Etc/GMT-5', 'Etc/GMT-5'), ('Etc/GMT-6', 'Etc/GMT-6'), ('Etc/GMT-7', 'Etc/GMT-7'), ('Etc/GMT-8', 'Etc/GMT-8'), ('Etc/GMT-9', 'Etc/GMT-9'), ('Etc/GMT0', 'Etc/GMT0'), ('Etc/Greenwich', 'Etc/Greenwich'), ('Etc/UCT', 'Etc/UCT'), ('Etc/UTC', 'Etc/UTC'), ('Etc/Universal', 'Etc/Universal'), ('Etc/Zulu', 'Etc/Zulu'), ('Europe/Amsterdam', 'Europe/Amsterdam'), ('Europe/Andorra', 'Europe/Andorra'), ('Europe/Astrakhan', 'Europe/Astrakhan'), ('Europe/Athens', 'Europe/Athens'), ('Europe/Belfast', 'Europe/Belfast'), ('Europe/Belgrade', 'Europe/Belgrade'), ('Europe/Berlin', 'Europe/Berlin'), ('Europe/Bratislava', 'Europe/Bratislava'), ('Europe/Brussels', 'Europe/Brussels'), ('Europe/Bucharest', 'Europe/Bucharest'), ('Europe/Budapest', 'Europe/Budapest'), ('Europe/Busingen', 'Europe/Busingen'), ('Europe/Chisinau', 'Europe/Chisinau'), ('Europe/Copenhagen', 'Europe/Copenhagen'), ('Europe/Dublin', 'Europe/Dublin'), ('Europe/Gibraltar', 'Europe/Gibraltar'), ('Europe/Guernsey', 'Europe/Guernsey'), ('Europe/Helsinki', 'Europe/Helsinki'), ('Europe/Isle_of_Man', 'Europe/Isle_of_Man'), ('Europe/Istanbul', 'Europe/Istanbul'), ('Europe/Jersey', 'Europe/Jersey'), ('Europe/Kaliningrad', 'Europe/Kaliningrad'), ('Europe/Kiev', 'Europe/Kiev'), ('Europe/Kirov', 'Europe/Kirov'), ('Europe/Lisbon', 'Europe/Lisbon'), ('Europe/Ljubljana', 'Europe/Ljubljana'), ('Europe/London', 'Europe/London'), ('Europe/Luxembourg', 'Europe/Luxembourg'), ('Europe/Madrid', 'Europe/Madrid'), ('Europe/Malta', 'Europe/Malta'), ('Europe/Mariehamn', 'Europe/Mariehamn'), ('Europe/Minsk', 'Europe/Minsk'), ('Europe/Monaco', 'Europe/Monaco'), ('Europe/Moscow', 'Europe/Moscow'), ('Europe/Nicosia', 'Europe/Nicosia'), ('Europe/Oslo', 'Europe/Oslo'), ('Europe/Paris', 'Europe/Paris'), ('Europe/Podgorica', 'Europe/Podgorica'), ('Europe/Prague', 'Europe/Prague'), ('Europe/Riga', 'Europe/Riga'), ('Europe/Rome', 'Europe/Rome'), ('Europe/Samara', 'Europe/Samara'), ('Europe/San_Marino', 'Europe/San_Marino'), ('Europe/Sarajevo', 'Europe/Sarajevo'), ('Europe/Saratov', 'Europe/Saratov'), ('Europe/Simferopol', 'Europe/Simferopol'), ('Europe/Skopje', 'Europe/Skopje'), ('Europe/Sofia', 'Europe/Sofia'), ('Europe/Stockholm', 'Europe/Stockholm'), ('Europe/Tallinn', 'Europe/Tallinn'), ('Europe/Tirane', 'Europe/Tirane'), ('Europe/Tiraspol', 'Europe/Tiraspol'), ('Europe/Ulyanovsk', 'Europe/Ulyanovsk'), ('Europe/Uzhgorod', 'Europe/Uzhgorod'), ('Europe/Vaduz', 'Europe/Vaduz'), ('Europe/Vatican', 'Europe/Vatican'), ('Europe/Vienna', 'Europe/Vienna'), ('Europe/Vilnius', 'Europe/Vilnius'), ('Europe/Volgograd', 'Europe/Volgograd'), ('Europe/Warsaw', 'Europe/Warsaw'), ('Europe/Zagreb', 'Europe/Zagreb'), ('Europe/Zaporozhye', 'Europe/Zaporozhye'), ('Europe/Zurich', 'Europe/Zurich'), ('GB', 'GB'), ('GB-Eire', 'GB-Eire'), ('GMT', 'GMT'), ('GMT+0', 'GMT+0'), ('GMT-0', 'GMT-0'), ('GMT0', 'GMT0'), ('Greenwich', 'Greenwich'), ('HST', 'HST'), ('Hongkong', 'Hongkong'), ('Iceland', 'Iceland'), ('Indian/Antananarivo', 'Indian/Antananarivo'), ('Indian/Chagos', 'Indian/Chagos'), ('Indian/Christmas', 'Indian/Christmas'), ('Indian/Cocos', 'Indian/Cocos'), ('Indian/Comoro', 'Indian/Comoro'), ('Indian/Kerguelen', 'Indian/Kerguelen'), ('Indian/Mahe', 'Indian/Mahe'), ('Indian/Maldives', 'Indian/Maldives'), ('Indian/Mauritius', 'Indian/Mauritius'), ('Indian/Mayotte', 'Indian/Mayotte'), ('Indian/Reunion', 'Indian/Reunion'), ('Iran', 'Iran'), ('Israel', 'Israel'), ('Jamaica', 'Jamaica'), ('Japan', 'Japan'), ('Kwajalein', 'Kwajalein'), ('Libya', 'Libya'), ('MET', 'MET'), ('MST', 'MST'), ('MST7MDT', 'MST7MDT'), ('Mexico/BajaNorte', 'Mexico/BajaNorte'), ('Mexico/BajaSur', 'Mexico/BajaSur'), ('Mexico/General', 'Mexico/General'), ('NZ', 'NZ'), ('NZ-CHAT', 'NZ-CHAT'), ('Navajo', 'Navajo'), ('PRC', 'PRC'), ('PST8PDT', 'PST8PDT'), ('Pacific/Apia', 'Pacific/Apia'), ('Pacific/Auckland', 'Pacific/Auckland'), ('Pacific/Bougainville', 'Pacific/Bougainville'), ('Pacific/Chatham', 'Pacific/Chatham'), ('Pacific/Chuuk', 'Pacific/Chuuk'), ('Pacific/Easter', 'Pacific/Easter'), ('Pacific/Efate', 'Pacific/Efate'), ('Pacific/Enderbury', 'Pacific/Enderbury'), ('Pacific/Fakaofo', 'Pacific/Fakaofo'), ('Pacific/Fiji', 'Pacific/Fiji'), ('Pacific/Funafuti', 'Pacific/Funafuti'), ('Pacific/Galapagos', 'Pacific/Galapagos'), ('Pacific/Gambier', 'Pacific/Gambier'), ('Pacific/Guadalcanal', 'Pacific/Guadalcanal'), ('Pacific/Guam', 'Pacific/Guam'), ('Pacific/Honolulu', 'Pacific/Honolulu'), ('Pacific/Johnston', 'Pacific/Johnston'), ('Pacific/Kiritimati', 'Pacific/Kiritimati'), ('Pacific/Kosrae', 'Pacific/Kosrae'), ('Pacific/Kwajalein', 'Pacific/Kwajalein'), ('Pacific/Majuro', 'Pacific/Majuro'), ('Pacific/Marquesas', 'Pacific/Marquesas'), ('Pacific/Midway', 'Pacific/Midway'), ('Pacific/Nauru', 'Pacific/Nauru'), ('Pacific/Niue', 'Pacific/Niue'), ('Pacific/Norfolk', 'Pacific/Norfolk'), ('Pacific/Noumea', 'Pacific/Noumea'), ('Pacific/Pago_Pago', 'Pacific/Pago_Pago'), ('Pacific/Palau', 'Pacific/Palau'), ('Pacific/Pitcairn', 'Pacific/Pitcairn'), ('Pacific/Pohnpei', 'Pacific/Pohnpei'), ('Pacific/Ponape', 'Pacific/Ponape'), ('Pacific/Port_Moresby', 'Pacific/Port_Moresby'), ('Pacific/Rarotonga', 'Pacific/Rarotonga'), ('Pacific/Saipan', 'Pacific/Saipan'), ('Pacific/Samoa', 'Pacific/Samoa'), ('Pacific/Tahiti', 'Pacific/Tahiti'), ('Pacific/Tarawa', 'Pacific/Tarawa'), ('Pacific/Tongatapu', 'Pacific/Tongatapu'), ('Pacific/Truk', 'Pacific/Truk'), ('Pacific/Wake', 'Pacific/Wake'), ('Pacific/Wallis', 'Pacific/Wallis'), ('Pacific/Yap', 'Pacific/Yap'), ('Poland', 'Poland'), ('Portugal', 'Portugal'), ('ROC', 'ROC'), ('ROK', 'ROK'), ('Singapore', 'Singapore'), ('Turkey', 'Turkey'), ('UCT', 'UCT'), ('US/Alaska', 'US/Alaska'), ('US/Aleutian', 'US/Aleutian'), ('US/Arizona', 'US/Arizona'), ('US/Central', 'US/Central'), ('US/East-Indiana', 'US/East-Indiana'), ('US/Eastern', 'US/Eastern'), ('US/Hawaii', 'US/Hawaii'), ('US/Indiana-Starke', 'US/Indiana-Starke'), ('US/Michigan', 'US/Michigan'), ('US/Mountain', 'US/Mountain'), ('US/Pacific', 'US/Pacific'), ('US/Samoa', 'US/Samoa'), ('UTC', 'UTC'), ('Universal', 'Universal'), ('W-SU', 'W-SU'), ('WET', 'WET'), ('Zulu', 'Zulu')], default="('UTC', 'UTC')", max_length=64),
),
]
| 1,221.157895
| 22,884
| 0.684036
|
69b989649b13fe82ebd0dcdde6be60792736ada9
| 4,296
|
py
|
Python
|
connect_and_launch.py
|
404CoderNotFound/Aternos-On-Discord-Replit
|
b9df013260ae0f38500dc008985a8735658ad9d4
|
[
"MIT"
] | null | null | null |
connect_and_launch.py
|
404CoderNotFound/Aternos-On-Discord-Replit
|
b9df013260ae0f38500dc008985a8735658ad9d4
|
[
"MIT"
] | null | null | null |
connect_and_launch.py
|
404CoderNotFound/Aternos-On-Discord-Replit
|
b9df013260ae0f38500dc008985a8735658ad9d4
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import ElementNotInteractableException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
from helper import can_fire, can_fire_async
import asyncio
import time
from dotenv import load_dotenv
import os
from chromedriver_py import binary_path
if os.path.exists(os.path.relpath(".env")):
load_dotenv()
USER = os.getenv('USERNAME_C')
PASSWORD = os.getenv('PASSWORD_C')
URL = "https://aternos.org/server/"
SERVER_STATUS_URI = URL
connected = False
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
options.add_argument('--ignore-certificate-errors')
options.add_argument("--allow-insecure-localhost")
#options.add_argument("user-agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36 OPR/68.0.3618.125")
driver = webdriver.Chrome(options=options, executable_path=binary_path)
async def start_server():
""" Starts the server by clicking on the start button.
The try except part tries to find the confirmation button, and if it
doesn't, it continues to loop until the confirm button is clicked."""
# if not connected:
# connect_account()
await asyncio.sleep(5)
element = driver.find_element_by_xpath("/html/body/div[20]/div/div/div/div[3]/div[2]/div[2]")
element.click()
await asyncio.sleep(3)
element = driver.find_element_by_xpath('/html/body/div[2]/main/section/div[3]/div[4]/div[1]')
element.click()
await asyncio.sleep(10)
element = driver.find_element_by_xpath('/html/body/div[2]/main/div/div/div/main/div/a[2]')
element.click()
state = driver.find_element_by_xpath('/html/body/div[2]/main/section/div[3]/div[3]/div[1]/div/span[2]/span')
while state.text == "Wachten in de wachtrij":
state = driver.find_element_by_xpath('/html/body/div[2]/main/section/div[3]/div[3]/div[1]/div/span[2]/span')
try:
element = driver.find_element_by_xpath('/html/body/div[2]/main/section/div[3]/div[4]/div[6]')
element.click()
except ElementNotInteractableException as e:
pass
driver.close()
@can_fire
def connect_account():
""" Connects to the accounts through a headless chrome tab so we don't
have to do it every time we want to start or stop the server."""
driver.get(URL)
element = driver.find_element_by_xpath('//*[@id="user"]')
element.send_keys(USER)
element = driver.find_element_by_xpath('//*[@id="password"]')
element.send_keys(PASSWORD)
element = driver.find_element_by_xpath('//*[@id="login"]')
element.click()
connected = True
time.sleep(10)
@can_fire
def get_status():
# Returns the status of the server as a string
driver.get(SERVER_STATUS_URI)
time.sleep(2)
if not connected:
connect_account()
time.sleep(2)
element = driver.find_element_by_xpath('/html/body/div/main/section/div/div[2]/div/div[1]')
element.click()
time.sleep(1)
element = driver.find_element_by_class_name('statuslabel-label')
print(element.text)
return element.text
@can_fire
def get_number_of_players():
# Returns the number of players as a string
driver.get(SERVER_STATUS_URI)
if not connected:
connect_account()
time.sleep(5)
element = driver.find_element_by_xpath('/html/body/div/main/section/div/div[2]/div/div[1]')
element.click()
time.sleep(1)
number_of_players = WebDriverWait(driver, 360).until(ec.presence_of_element_located((By.XPATH, '/html/body/div[2]/main/section/div[3]/div[5]/div[2]/div[1]/div/div[2]/div/span')))
return number_of_players.text
async def stop_server():
if not connected:
connect_account()
driver.get(URL)
element = driver.find_element_by_xpath("/html/body/div/main/section/div/div[2]/div[1]/div[1]")
element.click()
await asyncio.sleep(3)
element = driver.find_element_by_xpath('//*[@id="stop"]')
element.click()
| 38.702703
| 182
| 0.704842
|
659bd563467f2030fdfc3d73b341c4e78418d501
| 28,245
|
py
|
Python
|
tests/test_sailor/test__base/test_fetch.py
|
isabella232/project-sailor
|
bd09fb8cbeaa6c95cc011f93bd830bc863b1c768
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sailor/test__base/test_fetch.py
|
isabella232/project-sailor
|
bd09fb8cbeaa6c95cc011f93bd830bc863b1c768
|
[
"Apache-2.0"
] | 1
|
2022-02-27T18:58:37.000Z
|
2022-02-27T21:14:29.000Z
|
tests/test_sailor/test__base/test_fetch.py
|
isabella232/project-sailor
|
bd09fb8cbeaa6c95cc011f93bd830bc863b1c768
|
[
"Apache-2.0"
] | null | null | null |
from typing import Iterable
import pytest
from sailor import _base
from sailor._base.fetch import (
fetch_data, apply_filters_post_request, parse_filter_parameters, _unify_filters, _compose_queries,
_strip_quote_marks)
@pytest.mark.filterwarnings('ignore:Following parameters are not in our terminology')
class TestQueryParsers:
@pytest.mark.parametrize('test_description,value,expected_values', [
('single string', 'the value', "'the value'"),
('list of strings', ['the value1', 'value2'], ["'the value1'", "'value2'"]),
('null value', None, 'null'),
('single integer', 7, "'7'"),
('list of integers', [3, 6, 1], ["'3'", "'6'", "'1'"]),
('single float', 3.14, "'3.14'"),
('list of floats', [3.4, 4.5], ["'3.4'", "'4.5'"]),
('mixed type list', ['value 1', 18., 'value2', 5, None], ["'value 1'", "'18.0'", "'value2'", "'5'", 'null']),
])
def test_unify_filters_only_equality_known_fields(self, value, expected_values, test_description):
expected_filters = [('filtered_term', 'eq', expected_values)]
field_map = {'filtered_term': _base.MasterDataField('filtered_term', 'filtered_term')}
filters = _unify_filters({'filtered_term': value}, None, field_map)
assert filters == expected_filters
# values of unknown fields are never modified. the user must have it right
@pytest.mark.parametrize('test_description,value,expected_values', [
('single value', 'the value', 'the value'), # this includes the null value
('quoted value single-quote', "'the value'", "'the value'"),
('quoted value double-quote', '"value"', '"value"'),
('list of values', ['value1', 'value2'], ["value1", "value2"]),
('single integer', 7, "7"),
('list of integers', [3, 6, 1], ["3", "6", "1"]),
('single float', 3.14, "3.14"),
('list of floats', [3.4, 4.5], ["3.4", "4.5"]),
('mixed type list', ['null', 18., "'value2'", 5], ["null", "18.0", "'value2'", "5"]),
])
def test_unify_filters_only_equality_unknown_fields(self, value, expected_values, test_description):
expected_filters = [('filtered_term', 'eq', expected_values)]
filters = _unify_filters({'filtered_term': value}, None, None)
assert filters == expected_filters
# values of known fields are put through the default QT => single quote everything with the exception of the 'null'
@pytest.mark.parametrize('test_description,value,expected_value', [
('quoted value single-quote', "'the value'", "'the value'"),
('quoted value double-quote', '"the value"', "'the value'"),
('unquoted value', 'the value', "'the value'"),
('empty quotes', '""', "''"),
('empty quotes', "''", "''"),
('other string', "datetimeoffset'2020-01-01'", "'datetimeoffset'2020-01-01''"), # nonsensical example
('null value', 'null', 'null'),
('single integer', 7, "'7'"),
('single float', 3.14, "'3.14'"),
])
def test_unify_filters_only_extended_known_fields(self, value, expected_value, test_description):
expected_filters = [('filtered_term', 'eq', expected_value)]
field_map = {'filtered_term': _base.MasterDataField('filtered_term', 'filtered_term')}
filters = _unify_filters(None, ['filtered_term == {}'.format(value)], field_map)
assert filters == expected_filters
@pytest.mark.parametrize('test_description,value,expected_value', [
('quoted value single-quote', "'the value'", "'the value'"),
('quoted value double-quote', '"the value"', '"the value"'),
('other string', "datetimeoffset'2020-01-01'", "datetimeoffset'2020-01-01'"),
('null value', 'null', 'null'),
('single integer', 7, '7'),
('single float', 3.14, '3.14'),
])
def test_unify_filters_only_extended_unknown_fields(self, value, expected_value, test_description):
expected_filters = [('filtered_term', 'eq', expected_value)]
filters = _unify_filters(None, ['filtered_term == {}'.format(value)], None)
assert filters == expected_filters
@pytest.mark.parametrize('test_description,equality_value,extended_value', [
('quoted value single-quote', 'the value', "'the value'"),
('quoted value double-quote', 'the value', '"the value"'),
('unquoted value double-quote', 'the value', 'the value'),
('quoted empty value', '', "''"),
('quoted empty value', '', '""'),
('other string', "datetimeoffset'2020-01-01'", "datetimeoffset'2020-01-01'"),
('null value', None, 'null'),
('single integer', 7, '7'),
('single float', 3.14, '3.14'),
])
def test_extended_equals_equality_different_types_known_fields(self, equality_value, extended_value,
test_description):
field_map = {'filtered_term': _base.MasterDataField('filtered_term', 'filtered_term')}
equality_filters = _unify_filters({'filtered_term': equality_value}, None, field_map)
extended_filters = _unify_filters(None, ['filtered_term == {}'.format(extended_value)], field_map)
assert equality_filters == extended_filters
@pytest.mark.parametrize('test_description,equality_value,extended_value', [
('quoted value single-quote', "'the value'", "'the value'"),
('quoted value double-quote', '"the value"', '"the value"'),
('other string', "datetimeoffset'2020-01-01'", "datetimeoffset'2020-01-01'"),
('null value', 'null', 'null'),
('single integer', 7, '7'),
('single float', 3.14, '3.14'),
])
def test_extended_equals_equality_different_types_unknown_fields(self, equality_value, extended_value,
test_description):
equality_filters = _unify_filters({'filtered_term': equality_value}, None, None)
extended_filters = _unify_filters(None, ['filtered_term == {}'.format(extended_value)], None)
assert equality_filters == extended_filters
@pytest.mark.parametrize('filter,odata_expression', [
('==', 'eq'), ('!=', 'ne'), ('<=', 'le'), ('>=', 'ge'), ('>', 'gt'), ('<', 'lt')
])
def test_unify_filters_extended_filter_types_unknown_fields(self, filter, odata_expression):
expected_filters = [('filtered_term', odata_expression, "value")]
filters = _unify_filters(None, ['filtered_term {} value'.format(filter)], None)
assert filters == expected_filters
@pytest.mark.parametrize('filter_term', [
'a == b', 'a==b', 'a ==b', 'a == b'
])
def test_unify_filters_different_extended_formatting_unquoted_known_fields(self, filter_term):
filters = _unify_filters(None, [filter_term], {'a': _base.MasterDataField('a', 'A')})
assert filters == [('A', 'eq', "'b'")]
@pytest.mark.parametrize('filter_term', [
'a == b', 'a==b', 'a ==b', 'a == b'
])
def test_unify_filters_different_extended_formatting_unquoted_unknown_fields(self, filter_term):
filters = _unify_filters(None, [filter_term], None)
assert filters == [('a', 'eq', 'b')]
@pytest.mark.parametrize('filter_term', [
"a == 'b'", "a=='b'", "a =='b'", "a == 'b'"
])
def test_unify_filters_different_extended_formatting_single_quoted_unknown_field(self, filter_term):
filters = _unify_filters(None, [filter_term], None)
assert filters == [('a', 'eq', "'b'")]
@pytest.mark.parametrize('filter_term', [
'a == "b"', 'a=="b"', 'a =="b"', 'a == "b"'
])
def test_unify_filters_different_extended_formatting_double_quoted_unknown_field(self, filter_term):
filters = _unify_filters(None, [filter_term], None)
assert filters == [('a', 'eq', '"b"')]
def test_unify_filters_property_mapping_kwargs_key_field(self):
filters = _unify_filters({'my_term': 'some_value'}, None, {'my_term': _base.MasterDataField('my_term',
'their_term')})
assert filters[0][0] == 'their_term'
def test_unify_filters_property_mapping_extended_key_field(self):
filters = _unify_filters(None, ['my_term == "foo"'], {'my_term': _base.MasterDataField('my_term',
'their_term')})
assert filters[0][0] == 'their_term'
def test_unify_filters_property_mapping_value_is_a_known_field(self):
filters = _unify_filters(None, ['some_field == other_field'],
{'some_field': _base.MasterDataField('some_field', 'SomeField'),
'other_field': _base.MasterDataField('other_field', 'OtherField')})
assert filters == [('SomeField', 'eq', 'OtherField')]
@pytest.mark.parametrize('testdescription,equality_filters,expected_unbreakable,expected_breakable', [
('no args returns empty', {}, [], []),
('single valued filters are unbreakable',
{'location': 'Paris', 'name': 'test'}, ["location eq 'Paris'", "name eq 'test'"], []),
('multi valued filters are breakable',
{'location': ['Paris', 'London']}, [], [["location eq 'Paris'", "location eq 'London'"]]),
('single and multi are correctly broken',
{'location': ['Paris', 'London'], 'name': 'test'}, ["name eq 'test'"],
[["location eq 'Paris'", "location eq 'London'"]]),
])
def test_parse_filter_parameters_equality_filters_known_fields(self, equality_filters, expected_unbreakable,
expected_breakable, testdescription):
field_map = {'location': _base.MasterDataField('location', 'location'),
'name': _base.MasterDataField('name', 'name')}
actual_unbreakable, actual_breakable = parse_filter_parameters(equality_filters, None, field_map)
assert actual_unbreakable == expected_unbreakable
assert actual_breakable == expected_breakable
@pytest.mark.parametrize('testdescription,equality_filters,expected_unbreakable,expected_breakable', [
('no args returns empty', {}, [], []),
('single valued filters are unbreakable',
{'location': "'Paris'", 'name': "'test'"}, ["location eq 'Paris'", "name eq 'test'"], []),
('multi valued filters are breakable',
{'location': ["'Paris'", "'London'"]}, [], [["location eq 'Paris'", "location eq 'London'"]]),
('single and multi are correctly broken',
{'location': ["'Paris'", "'London'"], 'name': "'test'"}, ["name eq 'test'"], [
["location eq 'Paris'", "location eq 'London'"]]),
])
def test_parse_filter_parameters_equality_filters_unknown_fields(self, equality_filters, expected_unbreakable,
expected_breakable, testdescription):
actual_unbreakable, actual_breakable = parse_filter_parameters(equality_filters, None, None)
assert actual_unbreakable == expected_unbreakable
assert actual_breakable == expected_breakable
@pytest.mark.parametrize('testdescription,extended_filters,expected_unbreakable', [
('no args returns empty', [], []),
('single param',
["startDate > '2020-01-01'"],
["startDate gt '2020-01-01'"]),
('multiple params',
["startDate > '2020-01-01'", "endDate < '2020-02-01'"],
["startDate gt '2020-01-01'", "endDate lt '2020-02-01'"]),
])
def test_parse_filter_parameters_extended_filters_are_unbreakable(self, extended_filters, expected_unbreakable,
testdescription):
actual_unbreakable, actual_breakable = parse_filter_parameters(extended_filters=extended_filters)
assert actual_unbreakable == expected_unbreakable
assert actual_breakable == []
def test_parse_filter_parameters_with_combined_filters_unknown_fields(self):
equality_filters = {'location': ["'Paris'", "'London'"]}
extended_filters = ["startDate > '2020-01-01'", "endDate < '2020-02-01'"]
actual_unbreakable, actual_breakable = \
parse_filter_parameters(equality_filters=equality_filters, extended_filters=extended_filters)
assert actual_unbreakable == ["startDate gt '2020-01-01'", "endDate lt '2020-02-01'"]
assert actual_breakable == [["location eq 'Paris'", "location eq 'London'"]]
def test_parse_filter_parameters_with_property_mapping(self):
equality_filters = {'location_name': ['Paris', 'London'], 'serial_number': 1234}
extended_filters = ["start_date > '2020-01-01'"]
field_map = {'location_name': _base.MasterDataField('location_name', 'location'),
'serial_number': _base.MasterDataField('serial_number', 'serialNumber',
query_transformer=lambda x: str(x)),
'start_date': _base.MasterDataField('start_date', 'startDate')}
actual_unbreakable, actual_breakable = \
parse_filter_parameters(equality_filters, extended_filters, field_map)
assert actual_unbreakable == ["serialNumber eq 1234", "startDate gt '2020-01-01'"]
assert actual_breakable == [["location eq 'Paris'", "location eq 'London'"]]
@pytest.mark.parametrize('testdescr,equality_filters,extended_filters', [
('equality', {'field_str': 'PaloAlto', 'field_str_qt': 'Walldorf', 'field_int': 1234},
[]),
('extended_w/_quotes', {},
["field_str == 'PaloAlto'", "field_str_qt == 'Walldorf'", "field_int == '1234'"]),
('extended_w/_double_quotes', {},
["field_str == \"PaloAlto\"", "field_str_qt == \"Walldorf\"", "field_int == \"1234\""]),
('extended_w/o_quotes', {},
["field_str == PaloAlto", "field_str_qt == Walldorf", "field_int == 1234"]),
])
@pytest.mark.filterwarnings('ignore:Trying to parse non-timezone-aware timestamp, assuming UTC.')
def test_parse_filter_parameters_with_query_transformer(self, equality_filters, extended_filters, testdescr):
expected_unbreakable = ["FieldStr eq 'PaloAlto'", "FieldStrQT eq 'PREFIX_Walldorf'",
"FieldInt eq 1234"]
def str_add_prefix(x):
return "'PREFIX_" + str(x) + "'"
field_map = {'field_str': _base.MasterDataField('field_str', 'FieldStr',),
'field_str_qt': _base.MasterDataField('field_str_qt', 'FieldStrQT',
query_transformer=str_add_prefix),
'field_int': _base.MasterDataField('field_int', 'FieldInt',
query_transformer=lambda x: int(x))
}
actual_unbreakable, actual_breakable = \
parse_filter_parameters(equality_filters, extended_filters, field_map)
assert actual_unbreakable == expected_unbreakable
assert actual_breakable == []
def test_parse_filter_parameters_with_query_transformer_equality_list(self):
equality_filters = {'location_name': ['PaloAlto', 'Walldorf']}
extended_filters = []
expected_breakable = [["location eq 'PREFIX_PaloAlto'", "location eq 'PREFIX_Walldorf'"]]
def add_prefix(x):
return "'PREFIX_" + str(x) + "'"
field_map = {'location_name': _base.MasterDataField('location_name', 'location', query_transformer=add_prefix)}
actual_unbreakable, actual_breakable = \
parse_filter_parameters(equality_filters, extended_filters, field_map)
assert actual_unbreakable == []
assert actual_breakable == expected_breakable
@pytest.mark.parametrize('testdescription,equality_filters,extended_filters,expected_ids', [
('without filters as None', None, None, ['indicator_id1', 'indicator_id2', 'indicator_id3']),
('without filters as dict and list', {}, [], ['indicator_id1', 'indicator_id2', 'indicator_id3']),
('equality filters', dict(type='yellow', dimension='zero'), None, ['indicator_id1']),
('equality filter list', dict(type=['yellow', 'brown']), None, ['indicator_id1', 'indicator_id2', 'indicator_id3']),
('extended filters', None, ['categoryID > "aa"'], ['indicator_id3']),
('both filters', dict(type='brown'), ['categoryID > "a"'], ['indicator_id3']),
('both filters yields empty result', dict(type='yellow'), ['categoryID > "aa"'], []),
])
@pytest.mark.filterwarnings('ignore:Following parameters are not in our terminology')
def test_apply_filters_post_request_filtering(equality_filters, extended_filters, expected_ids, testdescription):
data = [{'id': 'indicator_id1', 'type': 'yellow', 'dimension': 'zero', 'categoryID': 'aa'},
{'id': 'indicator_id2', 'type': 'yellow', 'dimension': 'three', 'categoryID': 'aa'},
{'id': 'indicator_id3', 'type': 'brown', 'dimension': 'three', 'categoryID': 'aaaa'}]
actual = apply_filters_post_request(data, equality_filters, extended_filters, field_map=None)
assert [item['id'] for item in actual] == expected_ids
def test_apply_filters_post_request_property_mapping():
data = [{'propertyId': 'indicator_id1', 'indicatorType': 'yellow', 'categoryID': 'aa'},
{'propertyId': 'indicator_id2', 'indicatorType': 'yellow', 'categoryID': 'aa'},
{'propertyId': 'indicator_id3', 'indicatorType': 'brown', 'categoryID': 'aaaa'}]
field_map = {'type': _base.masterdata.MasterDataField('type', 'indicatorType'),
'template_id': _base.masterdata.MasterDataField('template_id', 'categoryID')}
equality_filters = dict(type='yellow')
extended_filters = ['template_id > "a"']
expected_result = [{'propertyId': 'indicator_id1', 'indicatorType': 'yellow', 'categoryID': 'aa'},
{'propertyId': 'indicator_id2', 'indicatorType': 'yellow', 'categoryID': 'aa'}]
actual = apply_filters_post_request(data, equality_filters, extended_filters, field_map)
assert actual == expected_result
class TestComposeQueries:
@pytest.mark.parametrize('testdescription,unbreakable_filters,breakable_filters,expected', [
('empty input returns empty filters',
[], [], []),
('unbreakable filters are combined with "and"',
["name eq 'test'", "location eq 'Paris'"], [],
["name eq 'test' and location eq 'Paris'"]),
('breakable filters are combined with "or"',
[], [["location eq 'Paris'", "location eq 'London'"]],
["(location eq 'Paris' or location eq 'London')"]),
('multiple breakable filters are connected with "and"',
[], [["testFac eq 'abcCorp'", "testFac eq '123pumps'"], ["location eq 'Paris'", "location eq 'London'"]],
["(testFac eq 'abcCorp' or testFac eq '123pumps') and (location eq 'Paris' or location eq 'London')"]),
('un- and breakable filters are connected with "and"',
["name eq 'test'"], [["location eq 'Paris'", "location eq 'London'"]],
["name eq 'test' and (location eq 'Paris' or location eq 'London')"])
])
def test_regular_cases(self, unbreakable_filters, breakable_filters, expected, testdescription):
actual = _compose_queries(unbreakable_filters, breakable_filters)
assert actual == expected
# the correctness of the test depends on what is configured as the max_filter_length in _compose_queries
def test_too_many_filters_are_split_verify_split_start_end(self):
unbreakable_filters = []
breakable_filters = [[f"manufacturer eq '{'abcCorp' if i % 2 == 0 else '123pumps'}_{i}'" for i in range(100)],
["location eq 'Paris'", "location eq 'London'"]]
expected_start_of_1st_filter = "(location eq 'Paris' or location eq 'London') and (manufacturer eq 'abcCorp_0'"
expected_start_of_2nd_filter = "(location eq 'Paris' or location eq 'London') and (manufacturer eq "
expected_end_of_2nd_filter = "manufacturer eq '123pumps_99')"
actual = _compose_queries(unbreakable_filters, breakable_filters)
assert len(actual) == 2
assert actual[0].startswith(expected_start_of_1st_filter)
assert actual[1].startswith(expected_start_of_2nd_filter)
assert actual[1].endswith(expected_end_of_2nd_filter)
# the correctness of the test depends on what is configured as the max_filter_length in _compose_queries
def test_too_many_filters_are_split_verify_split_borders(self):
unbreakable_filters = []
breakable_filters = [[f"manufacturer eq '{'abcCorp' if i % 2 == 0 else '123pumps'}_{i}'" for i in range(100)],
["location eq 'Paris'", "location eq 'London'"]]
expected_end_of_1st_filter = "manufacturer eq '123pumps_59')"
expected_start_of_2nd_filter = "(location eq 'Paris' or location eq 'London') and (manufacturer eq 'abcCorp_60'"
actual = _compose_queries(unbreakable_filters, breakable_filters)
assert len(actual) == 2
assert actual[0].endswith(expected_end_of_1st_filter)
assert actual[1].startswith(expected_start_of_2nd_filter)
# the correctness of the test depends on what is configured as the max_filter_length in _compose_queries
def test_too_many_filters_are_split_all_filters_present(self):
unbreakable_filters = ["name eq 'test'"]
breakable_filters = [[f"manufacturer eq '{'abcCorp' if i % 2 == 0 else '123pumps'}_{i}'" for i in range(100)],
["location eq 'Paris'", "location eq 'London'"]]
actual = _compose_queries(unbreakable_filters, breakable_filters)
big_filter_string = ''.join(actual)
assert unbreakable_filters[0] in big_filter_string
for sublist in breakable_filters:
for item in sublist:
assert item in big_filter_string
@pytest.mark.filterwarnings('ignore:Following parameters are not in our terminology')
def test_compose_queries_many_filters_break_by_one(self):
# for this test _compose_queries field name and field_values
# must be chosen such that exactly two queries result, and the
# second one gets just *one* entry. This assumes max_filter_length
# in _compose_queries is 2000
field_value = '144B9A523FB54D00B574365605C6E343'
field_values = [field_value] * 36
filters = parse_filter_parameters({'query_field_name': field_values})
queries = _compose_queries(*filters)
assert len(queries) == 2
assert queries[0].count(field_value) == 35
assert queries[1].count(field_value) == 1
assert not queries[1].startswith(' and ')
class TestFetchData:
@staticmethod
def generic_response_handler(result, endpoint_data):
result.extend(endpoint_data)
return result
@pytest.mark.filterwarnings('ignore::sailor.utils.utils.DataNotFoundWarning')
@pytest.mark.parametrize('testdesc,unbreakable_filters,breakable_filters,remote_return', [
('no filters - single return', [], [], {'a': 'dict'}),
('filters - single return', ['a gt b'], [['c eq 1']], {'a': 'dict'}),
('no filters - list return', [], [], ['result']),
('filters - list return', ['a gt b'], [['c eq 1']], ['result']),
])
def test_returns_iterable(self, mock_request, unbreakable_filters, breakable_filters, remote_return, testdesc):
mock_request.return_value = remote_return
actual = fetch_data('dummy_client_name', self.generic_response_handler,
'', unbreakable_filters, breakable_filters)
assert not issubclass(actual.__class__, str)
assert isinstance(actual, Iterable)
def test_no_filters_makes_remote_call_without_query_params(self, mock_request):
mock_request.return_value = ['result']
unbreakable_filters = []
breakable_filters = []
expected_params = {'$format': 'json'}
actual = fetch_data('dummy_client_name', self.generic_response_handler,
'', unbreakable_filters, breakable_filters)
mock_request.assert_called_once_with('GET', '', params=expected_params)
assert actual == ['result']
@pytest.mark.filterwarnings('ignore::sailor.utils.utils.DataNotFoundWarning')
def test_adds_filter_parameter_on_call(self, mock_request):
unbreakable_filters = ["location eq 'Walldorf'"]
breakable_filters = [["manufacturer eq 'abcCorp'"]]
expected_parameters = {'$filter': "location eq 'Walldorf' and (manufacturer eq 'abcCorp')",
'$format': 'json'}
fetch_data('dummy_client_name', self.generic_response_handler,
'', unbreakable_filters, breakable_filters)
mock_request.assert_called_once_with('GET', '', params=expected_parameters)
def test_multiple_calls_aggregated_result(self, mock_request):
unbreakable_filters = ["location eq 'Walldorf'"]
# causes _compose_queries to generate two filter strings
breakable_filters = [["manufacturer eq 'abcCorp'"] * 100]
mock_request.side_effect = [["result1-1", "result1-2"], ["result2-1"]]
expected_result = ["result1-1", "result1-2", "result2-1"]
actual = fetch_data('dummy_client_name', self.generic_response_handler,
'', unbreakable_filters, breakable_filters)
assert actual == expected_result
@pytest.mark.parametrize('input,expected', [
("'correctly-quoted'", 'correctly-quoted'),
('"correctly-quoted"', 'correctly-quoted'),
('"correctly-quoted-with-quotes"in-the"-middle"', 'correctly-quoted-with-quotes"in-the"-middle'),
('"wrong\'', '"wrong\''),
('"wrong', '"wrong'),
('just a string', 'just a string'),
('some-quotes"in-the"-middle', 'some-quotes"in-the"-middle'),
])
def test_strip_quote_marks(input, expected):
actual = _strip_quote_marks(input)
assert actual == expected
@pytest.mark.parametrize('testdescription,equality_filters,expected_ids', [
('numeric', dict(numeric_property=0), ['indicator_id1']),
('numeric list', dict(numeric_property=[0, 4.4]), ['indicator_id1', 'indicator_id2']),
('true boolean', dict(boolean_property=True), ['indicator_id1']),
('int boolean', dict(boolean_property=0), ['indicator_id2']),
# TODO: post request filtering based on result type
# ('string boolean', dict(boolean_property='1'), ['indicator_id1']),
# ('timestamp string', dict(timestamp_property='2020-01-01 11:22:33'), ['indicator_id1']),
])
def test_post_request_filtering_data_types_equality(equality_filters, expected_ids, testdescription):
data = [{'id': 'indicator_id1', 'numeric_property': 0, 'boolean_property': True,
'timestamp_property': '2020-01-01T11:22:33+00:00'},
{'id': 'indicator_id2', 'numeric_property': 4.4, 'boolean_property': False,
'timestamp_property': '2020-01-01T11:22:33+02:00'}]
field_map = {
'id': _base.MasterDataField('filtered_term', 'filtered_term'),
'numeric_property': _base.MasterDataField('numeric_property', 'numeric_property',
query_transformer=_base.masterdata._qt_double),
'boolean_property': _base.MasterDataField('boolean_property', 'boolean_property',
query_transformer=_base.masterdata._qt_boolean_int_string),
'timestamp_property': _base.MasterDataField('timestamp_property', 'timestamp_property',
query_transformer=_base.masterdata._qt_timestamp)
}
actual = apply_filters_post_request(data, equality_filters, [], field_map=field_map)
assert [item['id'] for item in actual] == expected_ids
| 54.421965
| 120
| 0.630589
|
2a1380e61678d626032b724f9257f0935d28c652
| 19,453
|
py
|
Python
|
src/multi_agent/behaviour/memory.py
|
xenakal/Simulation_Interactions
|
4e428bb70445ba7fd2d102facdd18c5def4542b7
|
[
"MIT"
] | 2
|
2021-03-24T07:26:40.000Z
|
2022-02-05T23:07:27.000Z
|
src/multi_agent/behaviour/memory.py
|
xenakal/Simulation_Interactions
|
4e428bb70445ba7fd2d102facdd18c5def4542b7
|
[
"MIT"
] | null | null | null |
src/multi_agent/behaviour/memory.py
|
xenakal/Simulation_Interactions
|
4e428bb70445ba7fd2d102facdd18c5def4542b7
|
[
"MIT"
] | null | null | null |
import copy
from src.multi_agent.elements.target import TargetRepresentation
from src.multi_agent.tools.estimation import MultipleOwnerMemories, SingleOwnerMemories, is_in_list_TargetEstimator, \
ItemEstimation
from src.multi_agent.prediction.kalmanPrediction import KalmanPrediction
from src.my_utils.my_IO.IO_data import *
import numpy as np
class CombineDataChoice:
DATA_MEASURED_ONLY_SELF = "data measured only self"
DATA_KALMAN = "data_kalman"
DATA_PREDICTION_T_PLUS_1 = "data_preditiction_1"
DATA_PREDICTION_T_PLUS_2 = "data_preditiction_2"
class Memory:
"""
description:
Class with multiple arrays that store information about target.
params:
agentID -- agent to which the memory is associated
current_time -- time to be updated so that the memory is on time with the room.
nTime -- number of memories over time that should be stored (not use yet)
(Agent_Target_TargetEstimator) memory_all_agent -- list of all information the agent has (ie his reading +
reading other other agents sent). The function combine_data
uses memory_all_agent to create memory_agent.
(Target_TargetEstimator) memory_agent -- list of combined information for each target.
Now, it only contains the information the agent himself has gathered.
In the future, a different approach may be implemented, where some
sort of mean is implemented to combine the information all agents
exchange.
([KalmanPrediction, ...]) predictors -- KalmanPrediction objects tracking the detected targets.
(Target_TargetEstimator) best_estimation -- best estimation of actual position (ie removing the noise) of
each target.
(Target_TargetEstimator) predictions_order_X -- predictions at order X.
"""
def __init__(self, agent_id, nTime=20, current_time=0):
self.id = agent_id
self.time = current_time
self.nTime = nTime
self.memory_all_agent_from_agent = MultipleOwnerMemories()
self.memory_agent_from_agent = SingleOwnerMemories(agent_id)
self.memory_all_agent_from_target = MultipleOwnerMemories()
self.memory_agent_from_target = SingleOwnerMemories(agent_id)
self.memory_measured_from_target = SingleOwnerMemories(agent_id)
self.memory_local_kf = SingleOwnerMemories(agent_id)
self.memory_best_estimations_from_target = SingleOwnerMemories(agent_id)
self.memory_predictions_order_1_from_target = SingleOwnerMemories(agent_id)
self.memory_predictions_order_2_from_target = SingleOwnerMemories(agent_id)
self.predictors = []
"Logger to keep track of every send and received messages"
self.log_memory = create_logger(constants.ResultsPath.LOG_MEMORY, "Memory", self.id)
self.log_memory_local_kf = create_logger(constants.ResultsPath.LOG_MEMORY, "LOCAL_KF", self.id)
self.log_memory_global_kf = create_logger(constants.ResultsPath.LOG_MEMORY, "GLOBAL_KF", self.id)
self.log_all_kf_actions = create_logger(constants.ResultsPath.LOG_MEMORY, "ALL_KF", self.id)
def add_create_target_estimator(self, time_from_estimation, agent_id, agent_signature, item):
"""
:description
Creates an estimator if it doesn't exist and adds it to the memory_all_agent list
"""
self.log_memory.info("Add memory, from agent : " + str(agent_id) + " - target " + str(item.id))
# update "global info" list
self.memory_all_agent_from_target.add_create_itemEstimation(time_from_estimation, agent_id, agent_signature,
item)
# add predictor if doesn't exist yet
if not self.exists_predictor_for_target(item.id):
self.create_predictor_for_target(agent_id, item.id, item.xc, item.yc, item.vx, item.vy, item.ax,
item.ay, time_from_estimation)
# inform predictor of new measurement
target_predictor = self.get_target_predictor(item.id)
state = [item.xc, item.yc, item.vx, item.vy, item.ax, item.ay]
target_predictor.add_measurement(state, time_from_estimation)
(new_estimate_current_pos, new_var) = target_predictor.get_current_position()
kalman_target_representation = TargetRepresentation(item.id, new_estimate_current_pos[0],
new_estimate_current_pos[1], new_estimate_current_pos[2],
new_estimate_current_pos[3], 0, 0,
item.radius, item.target_type, item.color)
self.update_best_estimation(time_from_estimation, agent_id, agent_signature, kalman_target_representation)
self.update_predictions_lists(time_from_estimation, agent_id, agent_signature, item)
def add_target_estimator(self, estimator):
self.log_memory.info(
"Add memory, from agent : " + str(estimator.owner_id) + " - target " + str(estimator.item.id))
self.memory_all_agent_from_target.add_itemEstimation(estimator)
def add_create_agent_estimator_from_agent(self, time_from_estimation, agent, item):
"""
:description
Creates an estimator if it doesn't exist and adds it to the memory_all_agent list
"""
self.log_memory.info("Add memory, from agent : " + str(agent.id) + " - agent " + str(item.id))
# update "global info" list
self.memory_all_agent_from_agent.add_create_itemEstimation(time_from_estimation, agent.id,
agent.signature, item)
def add_agent_estimator(self, estimator):
self.log_memory.info(
"Add memory, from agent : " + str(estimator.owner_id) + " - agent " + str(estimator.item.id))
self.memory_all_agent_from_agent.add_itemEstimation(estimator)
def set_current_time(self, current_time):
self.time = current_time
self.memory_all_agent_from_target.current_time = current_time
self.memory_measured_from_target.current_time = current_time
def combine_data_agentCam(self):
"""
:description
Creates the memory_agent list from the memory_all_agent list
:param
(int) choice -- method used to create memory_agent list
=1 -> simply keep info read by agent whose memory this is
:note
In the future, a different method could be added, where the information from all agents is combined (for
example using some kind of mean) to create the memory_agent list.
"""
for (agent_id, target_id) in self.memory_all_agent_from_target.agents_and_items_discovered:
if agent_id == self.id:
for estimateur in self.memory_all_agent_from_target.get_agent_item_list(target_id, self.id):
if not is_in_list_TargetEstimator(self.memory_measured_from_target.get_item_list(target_id),
estimateur):
self.log_memory.info(
"Combine data from agent : " + str(agent_id) + " - target " + str(target_id))
self.memory_measured_from_target.add_itemEstimation(estimateur)
"Combine data related to agentCam"
if True:
for (agent_id, agent_observed_id) in self.memory_all_agent_from_agent.agents_and_items_discovered:
if agent_id == agent_observed_id:
for estimateur in self.memory_all_agent_from_agent.get_agent_item_list(agent_id, agent_id):
self.log_memory.info(
"Combine data from agent : " + str(agent_id) + " - agent " + str(agent_observed_id))
if not is_in_list_TargetEstimator(self.memory_agent_from_agent.get_item_list(agent_id),
estimateur):
self.log_memory.info(
"Combine data from agent : " + str(agent_id) + " - target " + str(agent_id))
self.memory_agent_from_agent.add_itemEstimation(estimateur)
def combine_data_userCam(self, choice=1):
if choice == 1:
for (agent_id, target_id) in self.memory_all_agent_from_target.agents_and_items_discovered:
for estimateur in self.memory_all_agent_from_target.get_agent_item_list(target_id, agent_id):
if not is_in_list_TargetEstimator(self.memory_measured_from_target.get_item_list(target_id),
estimateur):
self.log_memory.info(
"Combine data, from agent : " + str(agent_id) + " - target " + str(target_id))
self.memory_measured_from_target.add_itemEstimation(estimateur)
'''
best_estimator = (-1,1000000000,10000000)
target_id_list = []
for (agent_id, target_id) in self.memory_all_agent_from_target.Agent_item_already_discovered_list:
if target_id not in target_id_list:
target_id_list.append(target_id)
for (agent_id_to_check, target_id_to_check) in self.memory_all_agent_from_target.Agent_item_already_discovered_list:
if target_id == target_id_to_check:
estimator = self.memory_all_agent_from_target.get_Agent_item_list(target_id, agent_id)[-1]
norm_variance = np.sqrt(np.square(estimator.variance_on_estimation[0]) + np.square(estimator.variance_on_estimation[1]))
delta_t = constants.get_time() - estimator.time_stamp
if (not isinstance(best_estimator[0], TargetEstimation) or norm_variance < best_estimator[1]) and delta_t < best_estimator[2]:
best_estimator = (estimator,norm_variance,delta_t)
if isinstance(best_estimator[0], TargetEstimation) and not is_in_list_TargetEstimator(self.memory_measured_from_target.get_item_list(target_id), best_estimator[0]):
#self.log_memory.info("Combine data, from agent : " + str(agent_id) + " - target " + str(target_id))
self.memory_measured_from_target.add_itemEstimation(best_estimator[0])
'''
def get_previous_positions(self, targetID):
return self.memory_measured_from_target.get_item_list(targetID)
def getPreviousPositions_allMessages(self, targetID, agentID):
return self.memory_all_agent_from_target.get_agent_item_list(targetID, agentID)
def to_string_memory_all(self):
return self.memory_all_agent_from_target.to_string() + "\n" + self.memory_all_agent_from_agent.to_string()
def statistic_to_string(self):
return self.memory_all_agent_from_target.statistic_to_string()
def get_predictions(self, seeked_target_id):
"""
:return: a list [[targetId, [predicted_position1, ...]], ...]
"""
predictions = []
for target_id in seeked_target_id:
target_prediction = self.get_target_predictions(target_id)
if target_prediction is not None:
predictions.append([target_id, target_prediction])
"""
list_all_prediction = [list_predtion_tplus1 = Target_TargetEstimator(), list_predtion_t2 = Target_TargetEstimator()]
"""
return predictions
def get_target_predictions(self, seeked_target_id):
"""
:description:
Method used for the predictions of future positions.
:return the predicted positions for targetId """
predictor = self.get_target_predictor(seeked_target_id)
if predictor is None:
return []
return predictor.get_predictions()
def innovation_smaller_than_bound(self, seeked_target_id):
predictor = self.get_target_predictor(seeked_target_id)
if predictor is None:
return True
return predictor.innovation_smaller_than_bound()
def get_DKF_info_string(self, seeked_target_id):
"""
:description:
Method used for the communication of DKF messages. When an agent needs to send the DKF_info to the other
agents, it get it through this method.
:return the state/variance error info needed for the DKF
"""
predictor = self.get_target_predictor(seeked_target_id)
if predictor is None:
return []
return predictor.get_DKF_info_string()
def process_DKF_info(self, seeked_target_id, dfk_info_string, timestamp):
"""
:description:
Method used for the communication of DKF messages. When an agent receives an DKF_info message, it calls
this method to transmit this information to the DKF associated with the target.
"""
predictor = self.get_target_predictor(seeked_target_id)
if predictor is not None:
before = self.memory_best_estimations_from_target.get_item_list(seeked_target_id)[-1].item.xc
predictor.assimilate(dfk_info_string, timestamp)
(x, var) = predictor.get_current_position()
self.memory_best_estimations_from_target.update_last_itemEstimation(x[0], x[1], x[2], x[3], seeked_target_id)
after = self.memory_best_estimations_from_target.get_item_list(seeked_target_id)[-1].item.xc
if self.memory_local_kf.get_item_list(0):
self.log_all_kf_actions.info("ASSIMILATE")
self.log_all_kf_actions.info("local: {}".format(self.memory_local_kf.get_item_list(0)[-1].item.xc))
self.log_all_kf_actions.info("global: {}".format(self.memory_best_estimations_from_target.get_item_list(0)[-1].item.xc))
self.log_all_kf_actions.info("before: {}, after: {}".format(before, after))
def get_target_predictor(self, seeked_target_id):
""" :return the Kalman Predictor associated with this target """
for predictor in self.predictors:
if predictor.target_id == seeked_target_id:
return predictor
return None
def exists_predictor_for_target(self, seeked_target_id):
""" Checks if a predictor for the given target exists """
for predictor in self.predictors:
if predictor.target_id == seeked_target_id:
return True
return False
def create_predictor_for_target(self, agent_id, target_id, x_init, y_init, vx_init, vy_init, ax_init, ay_init,
timestamp):
""" Creates an entry in self.predictors for this target """
predictor = KalmanPrediction(agent_id, target_id, x_init, y_init, vx_init, vy_init, ax_init, ay_init, timestamp)
self.predictors.append(predictor)
def update_best_estimation(self, time_from_estimation, agent_id, agent_signature, item):
"""
:description
Updates the estimation list for each target
:param
"""
self.memory_best_estimations_from_target.add_create_itemEstimation(time_from_estimation, agent_id,
agent_signature, copy.copy(item))
self.memory_local_kf.add_create_itemEstimation(time_from_estimation, agent_id,
agent_signature, copy.copy(item))
"""
if len(self.memory_local_kf.get_item_list(0)) > 1:
self.log_all_kf_actions.info("UPDATE")
self.log_all_kf_actions.info("local: {}".format(self.memory_local_kf.get_item_list(0)[-1].item.xc))
self.log_all_kf_actions.info("global: {}".format(self.memory_best_estimations_from_target.get_item_list(0)[-1].item.xc))
self.log_memory_local_kf.info(self.memory_local_kf.get_item_list(0)[-2].item.xc)
self.log_memory_global_kf.info(self.memory_best_estimations_from_target.get_item_list(0)[-2].item.xc)
"""
def get_noiseless_estimations(self, seeked_target_id):
"""
:return:
if not found -- []
else -- TargetEstimator list
"""
return self.memory_best_estimations_from_target.get_item_list(seeked_target_id)
def get_local_kf(self, seeked_target_id):
return self.memory_local_kf.get_item_list(seeked_target_id)
def update_predictions_lists(self, time_from_estimation, agent_id, agent_signature, item):
# predictions_for_target = self.get_target_predictor(target_id).get_predictions()
predictions_for_target = self.get_target_predictions(item.id)
if len(predictions_for_target) < 2: # No predictions made
return
predictions_order_1 = predictions_for_target[0]
predictions_order_2 = predictions_for_target[1]
prediction_1, variance1 = predictions_order_1
prediction_2, variance2 = predictions_order_2
predict_vx = 0
predict_vy = 0
predict_ax = 0
predict_ay = 0
self.memory_predictions_order_1_from_target.add_create_itemEstimation(time_from_estimation, agent_id,
agent_signature, item)
self.memory_predictions_order_2_from_target.add_create_itemEstimation(time_from_estimation, agent_id,
agent_signature,item)
def compute_obstruction_time(self,filename1,filename2,room):
list = self.memory_measured_from_target
for target in room.information_simulation.target_list:
obstructed_time = [0,0,0,0,0]
item_list = list.get_item_list(target.id)
item_list.sort()
for item1,item2 in zip(item_list[:-1],item_list[1:]):
delta_t = item2.time_stamp - item1.time_stamp
for n,time in enumerate(obstructed_time):
if delta_t < (n+1)*constants.TIME_BTW_TARGET_ESTIMATOR and 10 < item1.time_stamp < 70 :
#print("item1: %.02f item2: %.02f" % (item1.time_stamp, item2.time_stamp))
#print(delta_t)
obstructed_time[n] += delta_t
break
elif delta_t > 5*constants.TIME_BTW_TARGET_ESTIMATOR and 10 < item1.time_stamp < 70 :
obstructed_time[-1] += delta_t
break
#fichier = open(filename1, "a")
#fichier.write("%s \n" % obstructed_time)
#fichier.close()
#fichier = open(constants.MapPath.MAIN_FOLDER +filename2, "a")
#fichier.write("%s \n"%obstructed_time)
#fichier.close()
| 51.736702
| 184
| 0.637125
|
5d42d32e77a7f5062105f83dd08ba2fbc9f08daf
| 3,020
|
py
|
Python
|
igibson/robots/jr2_kinova_robot.py
|
suresh-guttikonda/iGibson
|
a69e623058180146466cd52d4bb3c00d1facdacf
|
[
"MIT"
] | 360
|
2020-04-02T11:12:09.000Z
|
2022-03-24T21:46:58.000Z
|
igibson/robots/jr2_kinova_robot.py
|
suresh-guttikonda/iGibson
|
a69e623058180146466cd52d4bb3c00d1facdacf
|
[
"MIT"
] | 169
|
2020-04-07T21:01:05.000Z
|
2022-03-31T10:07:39.000Z
|
igibson/robots/jr2_kinova_robot.py
|
suresh-guttikonda/iGibson
|
a69e623058180146466cd52d4bb3c00d1facdacf
|
[
"MIT"
] | 94
|
2020-04-09T23:22:17.000Z
|
2022-03-17T21:49:03.000Z
|
import gym
import numpy as np
import pybullet as p
from igibson.external.pybullet_tools.utils import joints_from_names
from igibson.robots.robot_locomotor import LocomotorRobot
class JR2_Kinova(LocomotorRobot):
"""
JR2 Kinova robot
Reference: https://cvgl.stanford.edu/projects/jackrabbot/
Uses joint velocity control
"""
def __init__(self, config):
self.config = config
self.wheel_velocity = config.get("wheel_velocity", 0.3)
self.wheel_dim = 2
self.arm_velocity = config.get("arm_velocity", 1.0)
self.arm_dim = 5
LocomotorRobot.__init__(
self,
"jr2_urdf/jr2_kinova.urdf",
action_dim=self.wheel_dim + self.arm_dim,
scale=config.get("robot_scale", 1.0),
is_discrete=config.get("is_discrete", False),
control="velocity",
self_collision=True,
)
def set_up_continuous_action_space(self):
"""
Set up continuous action space
"""
self.action_high = np.array([self.wheel_velocity] * self.wheel_dim + [self.arm_velocity] * self.arm_dim)
self.action_low = -self.action_high
self.action_space = gym.spaces.Box(shape=(self.wheel_dim + self.arm_dim,), low=-1.0, high=1.0, dtype=np.float32)
def set_up_discrete_action_space(self):
"""
Set up discrete action space
"""
assert False, "JR2_Kinova does not support discrete actions"
def get_end_effector_position(self):
"""
Get end-effector position
"""
return self.parts["m1n6s200_end_effector"].get_position()
def robot_specific_reset(self):
"""
JR2 Kinova robot specific reset.
Initialize JR's arm to about the same height at its neck, facing forward
"""
super(JR2_Kinova, self).robot_specific_reset()
self.ordered_joints[2].reset_joint_state(-np.pi / 2.0, 0.0)
self.ordered_joints[3].reset_joint_state(np.pi / 2.0, 0.0)
self.ordered_joints[4].reset_joint_state(np.pi / 2.0, 0.0)
self.ordered_joints[5].reset_joint_state(np.pi / 2.0, 0.0)
self.ordered_joints[6].reset_joint_state(0.0, 0.0)
def load(self):
"""
Load the robot into pybullet. Filter out unnecessary self collision
due to modeling imperfection in the URDF
"""
ids = super(JR2_Kinova, self).load()
robot_id = self.robot_ids[0]
disable_collision_names = [
["base_chassis_joint", "pan_joint"],
["base_chassis_joint", "tilt_joint"],
["base_chassis_joint", "camera_joint"],
["jr2_fixed_body_joint", "pan_joint"],
["jr2_fixed_body_joint", "tilt_joint"],
["jr2_fixed_body_joint", "camera_joint"],
]
for names in disable_collision_names:
link_a, link_b = joints_from_names(robot_id, names)
p.setCollisionFilterPair(robot_id, robot_id, link_a, link_b, 0)
return ids
| 35.116279
| 120
| 0.631788
|
e3d527bd01bc9f4bd1545a978d582bb40247ed8d
| 2,743
|
py
|
Python
|
integration/apps/hpl_mkl/hpl_mkl.py
|
dannosliwcd/geopm
|
3ec0d223e700350ff37f6d10adde7b9bfbdba286
|
[
"BSD-3-Clause"
] | 77
|
2015-10-16T22:20:51.000Z
|
2022-03-30T22:51:12.000Z
|
integration/apps/hpl_mkl/hpl_mkl.py
|
dannosliwcd/geopm
|
3ec0d223e700350ff37f6d10adde7b9bfbdba286
|
[
"BSD-3-Clause"
] | 1,627
|
2016-05-17T18:25:53.000Z
|
2022-03-31T22:49:45.000Z
|
integration/apps/hpl_mkl/hpl_mkl.py
|
dannosliwcd/geopm
|
3ec0d223e700350ff37f6d10adde7b9bfbdba286
|
[
"BSD-3-Clause"
] | 44
|
2015-10-28T15:59:44.000Z
|
2022-03-25T20:28:18.000Z
|
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
'''
AppConf class for HPL MKL benchmark.
'''
import os
import textwrap
from apps.hpl_netlib import hpl_netlib
class HplMklAppConf(hpl_netlib.HplNetlibAppConf):
@staticmethod
def name():
return 'hpl_mkl'
def __init__(self, num_nodes, mach, frac_dram_per_node, cores_per_node=None):
super(HplMklAppConf, self).__init__(num_nodes, mach, frac_dram_per_node, cores_per_node)
self.mklroot = os.getenv('MKLROOT')
self.exec_path = os.path.join(self.mklroot, 'benchmarks/mp_linpack/xhpl_intel64_dynamic')
def get_bash_setup_commands(self):
benchmark_dir = os.path.dirname(os.path.abspath(__file__))
setup_commands = '{}\n'.format(os.path.join(benchmark_dir, 'check_env.sh'))
setup_commands += 'export MKL_NUM_THREADS={}\n'.format(self._cpu_per_rank)
setup_commands += textwrap.dedent('''
# For Mvapich
if [ -n "${MPIRUN_RANK}" ]; then
PMI_RANK=${MPIRUN_RANK}
fi
# For OpenMPI
if [ -n "${OMPI_COMM_WORLD_RANK}" ]; then
PMI_RANK=${OMPI_COMM_WORLD_RANK}
fi
''')
return setup_commands
| 39.753623
| 97
| 0.712359
|
5f1ffa81eab17b720f9f02a9d55a8720d64aa27d
| 40,222
|
py
|
Python
|
python/paddle/framework/io.py
|
silentstorm0531/Paddle
|
3c49f08ea3a0e45fed2538aa1e58c133572f1883
|
[
"Apache-2.0"
] | 1
|
2021-06-10T04:35:57.000Z
|
2021-06-10T04:35:57.000Z
|
python/paddle/framework/io.py
|
chenyanlei1/Paddle
|
f249a5f05f0f5832279244d88c8cb4eaaad1fbd4
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/framework/io.py
|
chenyanlei1/Paddle
|
f249a5f05f0f5832279244d88c8cb4eaaad1fbd4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import collections
import pickle
import six
import warnings
import sys
import numpy as np
if not six.PY2:
import copyreg
import paddle
# deprecated module import
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.io import _unpack_saved_dict, _pack_loaded_dict, _pickle_loads_mac
from paddle.fluid.io import _legacy_save as _legacy_static_save
from paddle.fluid.io import _open_file_buffer, _is_file_path, _is_memory_buffer
from paddle.fluid.framework import Variable, _varbase_creator, _dygraph_tracer, in_dygraph_mode, ParamBase, _current_expected_place, Program
from paddle.fluid.dygraph.jit import _SaveLoadConfig
from paddle.fluid.dygraph.io import _construct_program_holders, _construct_params_and_buffers
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX, INFER_PARAMS_INFO_SUFFIX
__all__ = []
def _build_saved_state_dict(state_dict):
save_dict = {}
name_table = {}
for key, value in state_dict.items():
if isinstance(value, (Variable, core.VarBase)):
save_dict[key] = value.numpy()
name_table[key] = value.name
else:
save_dict[key] = value
save_dict["StructuredToParameterName@@"] = name_table
return save_dict
def _load_state_dict_from_save_inference_model(model_path, config):
# 1. load program desc & construct _ProgramHolder
programs = _construct_program_holders(model_path, config.model_filename)
# 2. load layer parameters & buffers
with fluid.dygraph.guard():
persistable_var_dict = _construct_params_and_buffers(
model_path, programs, config.params_filename, append_suffix=False)
# 3. construct state_dict
load_param_dict = dict()
for var_name in persistable_var_dict:
load_param_dict[var_name] = persistable_var_dict[var_name].numpy()
# if *.info exists, we can recover structured_name
var_info_filename = str(config.params_filename) + ".info"
var_info_path = os.path.join(model_path, var_info_filename)
if os.path.exists(var_info_path):
with open(var_info_path, 'rb') as f:
extra_var_info = pickle.load(f)
structured_para_dict = dict()
for var_name in load_param_dict:
structured_name = extra_var_info[var_name].get(
'structured_name', None)
assert structured_name is not None, "Cannot find saved variable (%s)'s structured name in saved model." % var_name
structured_para_dict[structured_name] = load_param_dict[
var_name]
load_param_dict = structured_para_dict
return load_param_dict
def _load_state_dict_from_save_params(model_path):
# Try to load all the files in the directory in VarBase format,
# the file name is used as the name of VarBase
load_var_list = []
# 1. load file names
var_name_list = []
for root, _, files in os.walk(model_path):
for filename in files:
file_path = os.path.join(root, filename)
tmp_var_name = os.path.relpath(file_path, model_path)
var_name = tmp_var_name.replace("\\", "/")
var_name_list.append(var_name)
# 2. create and load VarBase
with fluid.dygraph.guard():
for name in var_name_list:
new_var = _varbase_creator(name=name, persistable=True)
_dygraph_tracer().trace_op(
type='load',
inputs={},
outputs={'Out': new_var},
attrs={'file_path': os.path.join(model_path, name)})
load_var_list.append(new_var)
# 3. construct state_dict
load_param_dict = dict()
for var in load_var_list:
load_param_dict[var.name] = var.numpy()
return load_param_dict
# NOTE(chenweihang): [ Handling of use cases of API paddle.load ]
# `paddle.load` may be used to load saved results of:
# 1. Expected cases:
# - need [full filename] when loading
# - paddle.save
# - paddle.static.save
# - paddle.fluid.save_dygraph
# - need [prefix] when loading [compatible for paddle 2.x]
# - paddle.jit.save
# - paddle.static.save_inference_model
# - need [directory] when loading [compatible for paddle 1.x]
# - paddle.fluid.io.save_inference_model
# - paddle.fluid.io.save_params/save_persistable
# 2. Error cases:
# - no error case
def _build_load_path_and_config(path, config):
# NOTE(chenweihang): If both [prefix save format] and [directory save format] exist,
# raise error, avoid confusing behavior
prefix_format_path = path + INFER_MODEL_SUFFIX
prefix_format_exist = os.path.exists(prefix_format_path)
directory_format_exist = os.path.isdir(path)
if prefix_format_exist and directory_format_exist:
raise ValueError(
"The %s.pdmodel and %s directory exist at the same time, "
"don't know which one to load, please make sure that the specified target "
"of ``path`` is unique." % (path, path))
elif not prefix_format_exist and not directory_format_exist:
error_msg = "The ``path`` (%s) to load model not exists."
# if current path is a prefix, and the path.pdparams or path.pdopt
# is exist, users may want use `paddle.load` load the result of
# `fluid.save_dygraph`, we raise error here for users
params_file_path = path + ".pdparams"
opti_file_path = path + ".pdopt"
if os.path.exists(params_file_path) or os.path.exists(opti_file_path):
error_msg += " If you want to load the results saved by `fluid.save_dygraph`, " \
"please specify the full file name, not just the file name prefix. For " \
"example, it should be written as `paddle.load('model.pdparams')` instead of " \
"`paddle.load('model')`."
raise ValueError(error_msg % path)
else:
if prefix_format_exist:
file_prefix = os.path.basename(path)
model_path = os.path.dirname(path)
if config.model_filename is not None:
warnings.warn(
"When loading the result saved with the "
"specified file prefix, the ``model_filename`` config does "
"not take effect.")
config.model_filename = file_prefix + INFER_MODEL_SUFFIX
if config.params_filename is not None:
warnings.warn(
"When loading the result saved with the "
"specified file prefix, the ``params_filename`` config does "
"not take effect.")
config.params_filename = file_prefix + INFER_PARAMS_SUFFIX
else:
# Compatible with the old save_inference_model format
model_path = path
return model_path, config
def _parse_load_config(configs):
supported_configs = [
'model_filename', 'params_filename', 'keep_name_table', 'return_numpy'
]
# input check
for key in configs:
if key not in supported_configs:
raise ValueError(
"The additional config (%s) of `paddle.load` is not supported."
% key)
# construct inner config
inner_config = _SaveLoadConfig()
inner_config.model_filename = configs.get('model_filename', None)
inner_config.params_filename = configs.get('params_filename', None)
inner_config.keep_name_table = configs.get('keep_name_table', None)
inner_config.return_numpy = configs.get('return_numpy', False)
return inner_config
def _parse_save_config(configs):
supported_configs = ['use_binary_format', 'pickle_protocol']
# input check
for key in configs:
if key not in supported_configs:
raise ValueError(
"The additional config (%s) of `paddle.save` is not supported."
% key)
# construct inner config
inner_config = _SaveLoadConfig()
inner_config.use_binary_format = configs.get('use_binary_format', False)
inner_config.pickle_protocol = configs.get('pickle_protocol', None)
return inner_config
def _pickle_save(obj, f, protocol):
# TODO(weixin):add support for BytesIO.
if not isinstance(protocol, int):
raise ValueError("The 'protocol' MUST be `int`, but received {}".format(
type(protocol)))
if protocol < 2 or protocol > 4:
raise ValueError("Expected 1<'protocol'<5, but received protocol={}".
format(protocol))
def reudce_varbase(self):
data = self.numpy()
name = self.name
return (tuple, ((name, data), ))
def reduce_LoDTensor(self):
data = np.array(self)
return (eval, ('data', {'data': data}))
def add_dispatch_table():
# This is not a good method, because the pickle module has been modified.
pickle.dispatch_table[core.VarBase] = reudce_varbase
pickle.dispatch_table[ParamBase] = reudce_varbase
pickle.dispatch_table[core.LoDTensor] = reduce_LoDTensor
def pop_dispatch_table():
pickle.dispatch_table.pop(core.VarBase)
pickle.dispatch_table.pop(core.LoDTensor)
pickle.dispatch_table.pop(ParamBase)
# When value of dict is lager than 4GB ,there is a Bug on 'MAC python3'
if sys.platform == 'darwin' and sys.version_info.major == 3:
add_dispatch_table()
pickle_bytes = pickle.dumps(obj)
pop_dispatch_table()
max_bytes = 2**30
for i in range(0, len(pickle_bytes), max_bytes):
f.write(pickle_bytes[i:i + max_bytes])
else:
if six.PY2:
add_dispatch_table()
pickle_bytes = pickle.dump(obj, f, protocol)
pop_dispatch_table()
else:
pickler = pickle.Pickler(f, protocol)
pickler.dispatch_table = copyreg.dispatch_table.copy()
pickler.dispatch_table[core.VarBase] = reudce_varbase
pickler.dispatch_table[core.LoDTensor] = reduce_LoDTensor
pickler.dispatch_table[ParamBase] = reudce_varbase
pickler.dump(obj)
def _contain_x(obj, condition_func):
if isinstance(obj, core.SelectedRows):
raise NotImplementedError(
"`paddle.save` do not support saving 'SelectedRows'.")
if condition_func(obj):
return True
elif type(obj) in (dict, collections.OrderedDict, list, tuple):
if type(obj) in (dict, collections.OrderedDict):
keys = list(obj.keys())
else:
keys = range(len(obj))
flag = False
for key in keys:
flag |= _contain_x(obj[key], condition_func)
if flag:
return True
return flag
else:
return False
def _is_state_dict(obj):
if isinstance(obj, dict):
def condition(obj):
return isinstance(obj, (core.Layer, Program, core.VarBase,
core.LoDTensor, core.SelectedRows))
# If the value of a dict is a core.VarBase/LoDTensor or a dict
# that does not contain a paddle type(Layer, Program, VarBase, LoDTensor, SelectedRows),
# the dict is considered to be a state_ dict.
for key, value in obj.items():
if isinstance(value, dict):
for k, v in value.items():
if _contain_x(v, condition):
return False
elif not isinstance(value, (core.VarBase, core.LoDTensor)):
return False
return True
return False
def _transformed_from_varbase(obj):
# In paddle2.1 version, VarBase is saved as tuple(tensor.name, tensor.numpy()).
# When executing paddle.load, use this function to determine whether to restore to VarBase/LoDTensor.
if isinstance(obj, tuple) and len(obj) == 2:
if six.PY2:
name_types = (str, unicode)
else:
name_types = str
if isinstance(obj[0], name_types) and isinstance(obj[1], np.ndarray):
return True
return False
def _transformed_from_lodtensor(obj):
# In paddle2.1 version, LoDTensor is saved as np.array(tensor).
# When executing paddle.load, use this function to determine whether to restore to VarBase/LoDTensor.
if isinstance(obj, np.ndarray):
return True
return False
def _to_LodTensor(ndarray):
if not isinstance(ndarray, np.ndarray):
raise TypeError(
'Type of `ndarray` should be numpy.ndarray, but received {}.'.
format(type(ndarray)))
t = core.LoDTensor()
place = _current_expected_place()
t.set(ndarray, place)
return t
def _tuple_to_tensor(obj, return_numpy):
if return_numpy:
return obj[1]
if in_dygraph_mode():
t = paddle.to_tensor(obj[1])
# This function does modify the name of return value.
# Loading the same variable multiple times may cause the same name.
t.name = obj[0]
return t
else:
return _to_LodTensor(obj[1])
def _ndarray_to_tensor(obj, return_numpy):
if return_numpy:
return obj
if in_dygraph_mode():
return paddle.to_tensor(obj)
else:
return _to_LodTensor(obj)
def _lod_tensor2varbase(tensor):
return_var = _varbase_creator()
return_var.value().get_tensor().set(tensor, _current_expected_place())
return return_var
def _parse_every_object(obj, condition_func, convert_func):
if condition_func(obj):
return convert_func(obj)
elif type(obj) in (dict, collections.OrderedDict, list):
if type(obj) == list:
keys = range(len(obj))
else:
keys = list(obj.keys())
for key in keys:
if condition_func(obj[key]):
obj[key] = convert_func(obj[key])
else:
obj[key] = _parse_every_object(obj[key], condition_func,
convert_func)
return obj
elif type(obj) == tuple:
return tuple(
_parse_every_object(list(obj), condition_func, convert_func))
elif type(obj) == set:
return set(_parse_every_object(list(obj), condition_func, convert_func))
else:
if isinstance(obj, collections.Iterable) and not isinstance(obj, (
str, np.ndarray, core.VarBase, core.LoDTensor)):
raise NotImplementedError(
"The iteratable objects supported are tuple, list, dict, OrderedDict, string. But received {}.".
format(type(obj)))
return obj
def _parse_load_result(obj, return_numpy):
def is_layer(obj):
return isinstance(obj, core.Layer)
def parse_layer(obj):
temp_dict = _parse_load_result(obj.__dict__, False)
obj.__dict__.update(temp_dict)
return obj
if _contain_x(obj, is_layer):
if not in_dygraph_mode():
raise ValueError(
"Layer can only be loaded in dynamic graph mode, but now in static graph mode."
)
_parse_every_object(obj, is_layer, parse_layer)
def tuple_to_tensor(obj):
return _tuple_to_tensor(obj, return_numpy=return_numpy)
def ndarray_to_tensor(obj):
return _ndarray_to_tensor(obj, return_numpy=return_numpy)
# tuple(name, ndarry) was converted from varbase of paddle2.1,
# and all tuple(name, ndarry) are converted to tensor.
if _contain_x(obj, _transformed_from_varbase):
return _parse_every_object(obj, _transformed_from_varbase,
tuple_to_tensor)
# If there is no tuple(name, ndary), it is considered to be saved by paddle2.0
# or converted from LoDTensor, and all ndarrays are converted to tensor.
else:
return _parse_every_object(obj, _transformed_from_lodtensor,
ndarray_to_tensor)
def _save_lod_tensor(tensor, file_name):
if not tensor._is_initialized():
raise ValueError("The saved tensor is not initialized.")
if _is_file_path(file_name):
_seek = core.save_lod_tensor(tensor, file_name)
# '_seek' is the end position of this tensor in the file.
elif _is_memory_buffer(file_name):
tensor_bytes = core.save_lod_tensor_to_memory(tensor)
with _open_file_buffer(file_name, 'wb') as f:
f.write(tensor_bytes)
_seek = f.tell()
else:
raise NotImplementedError(
'Only supports saving objects to file or BytesIO, but received {}'.
format(type(file_name)))
return _seek
def _load_lod_tensor(file_name):
temp_t = paddle.fluid.core.LoDTensor()
if _is_file_path(file_name):
# '_seek' is the end position of this tensor in the file.
_seek = paddle.fluid.core.load_lod_tensor(temp_t, file_name)
elif _is_memory_buffer(file_name):
with _open_file_buffer(file_name, 'rb') as f:
tensor_bytes = f.read()
paddle.fluid.core.load_lod_tensor_from_memory(temp_t, tensor_bytes)
_seek = f.tell()
else:
raise NotImplementedError(
'Only supports load objects from file or BytesIO, but received {}'.
format(type(file_name)))
return temp_t, _seek
def _save_selected_rows(selected_rows, file_name):
if not selected_rows.get_tensor()._is_initialized():
raise ValueError("The saved tensor is not initialized.")
if _is_file_path(file_name):
# '_seek' is the end position of this SelectedRows in the file.
_seek = core.save_selected_rows(selected_rows, file_name)
elif _is_memory_buffer(file_name):
selected_rows_bytes = core.save_selected_rows_to_memory(selected_rows)
with _open_file_buffer(file_name, 'wb') as f:
f.write(selected_rows_bytes)
_seek = f.tell()
else:
raise NotImplementedError(
'Only supports saving objects to file or BytesIO, but received {}'.
format(type(file_name)))
return _seek
def _load_selected_rows(file_name):
temp_sr = core.SelectedRows()
if _is_file_path(file_name):
# '_seek' is the end position of this SelectedRows in the file.
_seek = core.load_selected_rows(temp_sr, file_name)
elif _is_memory_buffer(file_name):
with _open_file_buffer(file_name, 'rb') as f:
selected_rows_bytes = f.read()
paddle.fluid.core.load_selected_rows_from_memory(
temp_sr, selected_rows_bytes)
_seek = f.tell()
else:
raise NotImplementedError(
'Only supports load objects from file or BytesIO, but received {}'.
format(type(file_name)))
return temp_sr, _seek
def _save_binary_var(obj, path):
if isinstance(obj, core.LoDTensor):
_save_lod_tensor(obj, path)
elif isinstance(obj, core.SelectedRows):
_save_selected_rows(obj, path)
elif isinstance(obj, core.VarBase):
_save_lod_tensor(obj.value().get_tensor(), path)
else:
# Since the concept of 'Tensor' is only exposed to users, the error message can only contain tensor instead of 'LoDTensor' or 'SelectedRows'
raise NotImplementedError(
"When use_binary_format = True, `paddle.save` expected Tensor, but received {}.".
format(type(obj)))
def save(obj, path, protocol=4, **configs):
'''
Save an object to the specified path.
.. note::
Now supports saving ``state_dict`` of Layer/Optimizer, Layer, Tensor and nested structure containing Tensor, Program.
.. note::
Different from ``paddle.jit.save``, since the save result of ``paddle.save`` is a single file,
there is no need to distinguish multiple saved files by adding a suffix. The argument ``path``
of ``paddle.save`` will be directly used as the saved file name instead of a prefix.
In order to unify the saved file name format, we recommend using the paddle standard suffix:
1. for ``Layer.state_dict`` , recommend to use ``.pdparams`` ;
2. for ``Optimizer.state_dict`` , recommend to use ``.pdopt`` .
For specific examples, please refer to API code examples.
Args:
obj(Object) : The object to be saved.
path(str|BytesIO) : The path/buffer of the object to be saved.
If saved in the current directory, the input path string will be used as the file name.
protocol(int, optional): The protocol version of pickle module must be greater than 1 and less than 5.
Default: 4
**configs(dict, optional): optional keyword arguments. The following options are currently supported:
use_binary_format(bool): When the saved object is static graph variable, you can specify ``use_binary_for_var``.
If True, save the file in the c++ binary format when saving a single static graph variable; otherwise, save it in pickle format.
Default: False
Returns:
None
Examples:
.. code-block:: python
# example 1: dynamic graph
import paddle
emb = paddle.nn.Embedding(10, 10)
layer_state_dict = emb.state_dict()
# save state_dict of emb
paddle.save(layer_state_dict, "emb.pdparams")
scheduler = paddle.optimizer.lr.NoamDecay(
d_model=0.01, warmup_steps=100, verbose=True)
adam = paddle.optimizer.Adam(
learning_rate=scheduler,
parameters=emb.parameters())
opt_state_dict = adam.state_dict()
# save state_dict of optimizer
paddle.save(opt_state_dict, "adam.pdopt")
# save weight of emb
paddle.save(emb.weight, "emb.weight.pdtensor")
# example 2: Save multiple state_dict at the same time
from paddle import nn
from paddle.optimizer import Adam
layer = paddle.nn.Linear(3, 4)
adam = Adam(learning_rate=0.001, parameters=layer.parameters())
obj = {'model': layer.state_dict(), 'opt': adam.state_dict(), 'epoch': 100}
path = 'example/model.pdparams'
paddle.save(obj, path)
# example 3: static graph
import paddle
import paddle.static as static
paddle.enable_static()
# create network
x = paddle.static.data(name="x", shape=[None, 224], dtype='float32')
z = paddle.static.nn.fc(x, 10)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
prog = paddle.static.default_main_program()
for var in prog.list_vars():
if list(var.shape) == [224, 10]:
tensor = var.get_value()
break
# save/load tensor
path_tensor = 'temp/tensor.pdtensor'
paddle.save(tensor, path_tensor)
# save/load state_dict
path_state_dict = 'temp/model.pdparams'
paddle.save(prog.state_dict("param"), path_tensor)
# example 4: save program
import paddle
paddle.enable_static()
data = paddle.static.data(
name='x_static_save', shape=(None, 224), dtype='float32')
y_static = z = paddle.static.nn.fc(data, 10)
main_program = paddle.static.default_main_program()
path = "example/main_program.pdmodel"
paddle.save(main_program, path)
# example 5: save object to memory
from io import BytesIO
import paddle
from paddle.nn import Linear
paddle.disable_static()
linear = Linear(5, 10)
state_dict = linear.state_dict()
byio = BytesIO()
paddle.save(state_dict, byio)
tensor = paddle.randn([2, 3], dtype='float32')
paddle.save(tensor, byio)
'''
if _is_file_path(path):
# 1. input check
filename = os.path.basename(path)
if filename == "":
raise ValueError(
"The input path MUST be format of dirname/filename "
"[dirname\\filename in Windows system], but received "
"filename is empty string.")
# 2. save object
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
elif not _is_memory_buffer(path):
raise ValueError(
"only supports saving objects to file and `BytesIO`, but got {}".
format(type(path)))
config = _parse_save_config(configs)
if not isinstance(config.use_binary_format, bool):
raise TypeError(
"Type of `use_binary_format` should be bool, but received {}.".
format(type(config.use_binary_format)))
if config.use_binary_format:
_save_binary_var(obj, path)
else:
# `protocol` need to be used, `pickle_protocol` is a deprecated arg.
if config.pickle_protocol is not None:
protocol = config.pickle_protocol
warnings.warn(
"'pickle_protocol' is a deprecated argument. Please use 'protocol' instead."
)
if isinstance(obj, Program):
obj.desc.flush()
with _open_file_buffer(path, "wb") as f:
f.write(obj.desc.serialize_to_string())
elif _is_state_dict(obj):
if in_dygraph_mode():
_legacy_save(obj, path, protocol)
else:
_legacy_static_save(obj, path, protocol)
else:
with _open_file_buffer(path, 'wb') as f:
_pickle_save(obj, f, protocol)
def _legacy_save(obj, path, protocol=2):
# 1. input check
if not isinstance(obj, dict):
raise NotImplementedError(
"Now only supports save state_dict of Layer or Optimizer, "
"expect dict, but received %s." % type(obj))
if len(obj) == 0:
warnings.warn("The input state dict is empty, no need to save.")
if not isinstance(protocol, int):
raise ValueError("The 'protocol' MUST be `int`, but received {}".format(
type(protocol)))
if protocol < 2 or protocol > 4:
raise ValueError("Expected 1<'protocol'<5, but received protocol={}".
format(protocol))
if _is_file_path(path):
filename = os.path.basename(path)
if filename == "":
raise ValueError(
"The input path MUST be format of dirname/filename "
"[dirname\\filename in Windows system], but received "
"filename is empty string.")
# 2. save object
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if isinstance(obj, dict):
saved_obj = _build_saved_state_dict(obj)
saved_obj = _unpack_saved_dict(saved_obj, protocol)
# When value of dict is lager than 4GB ,there is a Bug on 'MAC python3'
if _is_file_path(
path) and sys.platform == 'darwin' and sys.version_info.major == 3:
pickle_bytes = pickle.dumps(saved_obj, protocol=protocol)
with open(path, 'wb') as f:
max_bytes = 2**30
for i in range(0, len(pickle_bytes), max_bytes):
f.write(pickle_bytes[i:i + max_bytes])
else:
with _open_file_buffer(path, 'wb') as f:
pickle.dump(saved_obj, f, protocol=protocol)
def load(path, **configs):
'''
Load an object can be used in paddle from specified path.
.. note::
Now supports loading ``state_dict`` of Layer/Optimizer, Layer, Tensor and nested structure containing Tensor, Program.
.. note::
In order to use the model parameters saved by paddle more efficiently,
``paddle.load`` supports loading ``state_dict`` of Layer from the result of
other save APIs except ``paddle.save`` , but the argument ``path`` format is
different:
1. loading from ``paddle.static.save`` or ``paddle.Model().save(training=True)`` ,
``path`` needs to be a complete file name, such as ``model.pdparams`` or
``model.pdopt`` ;
2. loading from ``paddle.jit.save`` or ``paddle.static.save_inference_model``
or ``paddle.Model().save(training=False)`` , ``path`` need to be a file prefix,
such as ``model/mnist``, and ``paddle.load`` will get information from
``mnist.pdmodel`` and ``mnist.pdiparams`` ;
3. loading from paddle 1.x APIs ``paddle.fluid.io.save_inference_model`` or
``paddle.fluid.io.save_params/save_persistables`` , ``path`` need to be a
directory, such as ``model`` and model is a directory.
.. note::
If you load ``state_dict`` from the saved result of static mode API such as
``paddle.static.save`` or ``paddle.static.save_inference_model`` ,
the structured variable name in dynamic mode will cannot be restored.
You need to set the argument ``use_structured_name=False`` when using
``Layer.set_state_dict`` later.
Args:
path(str|BytesIO) : The path/buffer to load the target object. Generally, the path is the target
file path. When loading state_dict from the saved result of the API used to save
the inference model, the path may be a file prefix or directory.
**configs (dict, optional): other load configuration options for compatibility. We do not
recommend using these configurations, they may be removed in the future. If not necessary,
DO NOT use them. Default None.
The following options are currently supported:
(1) model_filename (str): The inference model file name of the paddle 1.x
``save_inference_model`` save format. Default file name is :code:`__model__` .
(2) params_filename (str): The persistable variables file name of the paddle 1.x
``save_inference_model`` save format. No default file name, save variables separately
by default.
(3) return_numpy(bool): If specified as True, return tensor as numpy.ndarray, otherwise return tensor as paddle.Tensor.
Default False.
Returns:
Object(Object): a target object can be used in paddle
Examples:
.. code-block:: python
# example 1: dynamic graph
import paddle
emb = paddle.nn.Embedding(10, 10)
layer_state_dict = emb.state_dict()
# save state_dict of emb
paddle.save(layer_state_dict, "emb.pdparams")
scheduler = paddle.optimizer.lr.NoamDecay(
d_model=0.01, warmup_steps=100, verbose=True)
adam = paddle.optimizer.Adam(
learning_rate=scheduler,
parameters=emb.parameters())
opt_state_dict = adam.state_dict()
# save state_dict of optimizer
paddle.save(opt_state_dict, "adam.pdopt")
# save weight of emb
paddle.save(emb.weight, "emb.weight.pdtensor")
# load state_dict of emb
load_layer_state_dict = paddle.load("emb.pdparams")
# load state_dict of optimizer
load_opt_state_dict = paddle.load("adam.pdopt")
# load weight of emb
load_weight = paddle.load("emb.weight.pdtensor")
# example 2: Load multiple state_dict at the same time
from paddle import nn
from paddle.optimizer import Adam
layer = paddle.nn.Linear(3, 4)
adam = Adam(learning_rate=0.001, parameters=layer.parameters())
obj = {'model': layer.state_dict(), 'opt': adam.state_dict(), 'epoch': 100}
path = 'example/model.pdparams'
paddle.save(obj, path)
obj_load = paddle.load(path)
# example 3: static graph
import paddle
import paddle.static as static
paddle.enable_static()
# create network
x = paddle.static.data(name="x", shape=[None, 224], dtype='float32')
z = paddle.static.nn.fc(x, 10)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
prog = paddle.static.default_main_program()
for var in prog.list_vars():
if list(var.shape) == [224, 10]:
tensor = var.get_value()
break
# save/load tensor
path_tensor = 'temp/tensor.pdtensor'
paddle.save(tensor, path_tensor)
load_tensor = paddle.load(path_tensor)
# save/load state_dict
path_state_dict = 'temp/model.pdparams'
paddle.save(prog.state_dict("param"), path_tensor)
load_state_dict = paddle.load(path_tensor)
# example 4: load program
import paddle
paddle.enable_static()
data = paddle.static.data(
name='x_static_save', shape=(None, 224), dtype='float32')
y_static = z = paddle.static.nn.fc(data, 10)
main_program = paddle.static.default_main_program()
path = "example/main_program.pdmodel"
paddle.save(main_program, path)
load_main = paddle.load(path)
print(load_main)
# example 5: save object to memory
from io import BytesIO
import paddle
from paddle.nn import Linear
paddle.disable_static()
linear = Linear(5, 10)
state_dict = linear.state_dict()
byio = BytesIO()
paddle.save(state_dict, byio)
tensor = paddle.randn([2, 3], dtype='float32')
paddle.save(tensor, byio)
byio.seek(0)
# load state_dict
dict_load = paddle.load(byio)
'''
if _is_memory_buffer(path) or os.path.isfile(path):
config = _parse_load_config(configs)
if six.PY2:
exception_type = KeyError
else:
exception_type = pickle.UnpicklingError
try:
with _open_file_buffer(path, 'rb') as f:
# When value of dict is lager than 4GB ,there is a Bug on 'MAC python3'
if _is_file_path(
path
) and sys.platform == 'darwin' and sys.version_info.major == 3:
load_result = _pickle_loads_mac(path, f)
else:
load_result = pickle.load(f) if six.PY2 else pickle.load(
f, encoding='latin1')
# TODO(weixin):If `obj` is any object, the judgment condition should be more precise.
if isinstance(load_result, dict):
load_result = _pack_loaded_dict(load_result)
# paddle2.0: paddle.save/load
if "StructuredToParameterName@@" in load_result:
for key in load_result["StructuredToParameterName@@"]:
load_result[key] = _ndarray_to_tensor(
load_result[key], config.return_numpy)
if not config.keep_name_table and "StructuredToParameterName@@" in load_result:
del load_result["StructuredToParameterName@@"]
else:
# paddle2.1 static.save/load
load_result = _parse_load_result(load_result,
config.return_numpy)
else:
load_result = _parse_load_result(load_result,
config.return_numpy)
except exception_type as msg_pickle:
try:
tensor, _ = _load_selected_rows(path)
return tensor
except:
try:
tensor, _ = _load_lod_tensor(path)
if config.return_numpy:
return np.array(tensor)
else:
if in_dygraph_mode():
return _lod_tensor2varbase(tensor)
return tensor
except:
try:
with _open_file_buffer(path, "rb") as f:
program_desc_str = f.read()
program = Program.parse_from_string(
program_desc_str)
return program
except:
raise ValueError(
"`paddle.load` can not parse the file:{}.".format(
path))
else:
load_result = _legacy_load(path, **configs)
return load_result
def _legacy_load(path, **configs):
load_result = None
config = _parse_load_config(configs)
if os.path.isfile(path) or _is_memory_buffer(path):
# we think path is file means this file is created by paddle.save
with _open_file_buffer(path, 'rb') as f:
load_result = pickle.load(f) if six.PY2 else pickle.load(
f, encoding='latin1')
load_result = _pack_loaded_dict(load_result)
if not config.keep_name_table and "StructuredToParameterName@@" in load_result:
del load_result["StructuredToParameterName@@"]
else:
# file prefix and directory are compatible cases
model_path, config = _build_load_path_and_config(path, config)
# check whether model file exists
if config.model_filename is None:
model_filename = '__model__'
else:
model_filename = config.model_filename
model_file_path = os.path.join(model_path, model_filename)
if os.path.exists(model_file_path):
# Load state dict by `jit.save/io.save_inference_model` save format
# NOTE(chenweihang): [ Compatibility of save_inference_model save format ]
# The model saved by `save_inference_model` does not completely correspond to
# the information required by the `state_dict` under the dygraph.
# `save_inference_model` not save structured name, we need to remind
# the user to configure the `use_structured_name` argument when `set_state_dict`
# NOTE(chenweihang): `jit.save` doesn't save optimizer state
load_result = _load_state_dict_from_save_inference_model(model_path,
config)
else:
# load state dict by `io.save_params/persistables` save format
# TODO(chenweihang): [ Now only supports loading parameters seperately ]
# If users save all parameters as one file, the [ variable.name -> variable ]
# mapping info will lost, so users need to give variable list, but users build
# variable list in dygraph mode is difficult, we recommend users to use
# paddle.static.load_program_state in this case
load_result = _load_state_dict_from_save_params(model_path)
return load_result
| 39.126459
| 148
| 0.616106
|
aeb70a9855b53138d996fc668c94a90bddcfce66
| 2,460
|
py
|
Python
|
test/test_endpoints.py
|
sgnn7/conjur-api-python3
|
7b3adced83061eb34dcb41505c986c612b4a26ef
|
[
"Apache-2.0"
] | null | null | null |
test/test_endpoints.py
|
sgnn7/conjur-api-python3
|
7b3adced83061eb34dcb41505c986c612b4a26ef
|
[
"Apache-2.0"
] | null | null | null |
test/test_endpoints.py
|
sgnn7/conjur-api-python3
|
7b3adced83061eb34dcb41505c986c612b4a26ef
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from conjur.endpoints import ConjurEndpoint
class EndpointsTest(unittest.TestCase):
def test_endpoint_has_correct_authenticate_template_string(self):
auth_endpoint = ConjurEndpoint.AUTHENTICATE.value.format(url='http://host',
account='myacct',
login='mylogin')
self.assertEqual(auth_endpoint, 'http://host/authn/myacct/mylogin/authenticate')
def test_endpoint_has_correct_login_template_string(self):
auth_endpoint = ConjurEndpoint.LOGIN.value.format(url='http://host',
account='myacct')
self.assertEqual(auth_endpoint, 'http://host/authn/myacct/login')
def test_endpoint_has_correct_secrets_template_string(self):
auth_endpoint = ConjurEndpoint.SECRETS.value.format(url='http://host',
account='myacct',
kind='varkind',
identifier='varid')
self.assertEqual(auth_endpoint, 'http://host/secrets/myacct/varkind/varid')
def test_endpoint_has_correct_batch_secrets_template_string(self):
batch_endpoint = ConjurEndpoint.BATCH_SECRETS.value.format(url='http://host')
self.assertEqual(batch_endpoint, 'http://host/secrets')
def test_endpoint_has_correct_policy_template_string(self):
auth_endpoint = ConjurEndpoint.POLICIES.value.format(url='http://host',
account='myacct',
identifier='polid')
self.assertEqual(auth_endpoint, 'http://host/policies/myacct/policy/polid')
def test_endpoint_has_correct_resources_template_string(self):
auth_endpoint = ConjurEndpoint.RESOURCES.value.format(url='http://host',
account='myacct')
self.assertEqual(auth_endpoint, 'http://host/resources/myacct')
def test_endpoint_has_correct_whoami_template_string(self):
auth_endpoint = ConjurEndpoint.WHOAMI.value.format(url='http://host',
account='myacct')
self.assertEqual(auth_endpoint, 'http://host/whoami')
| 55.909091
| 88
| 0.569106
|
8db87bec2b1926c08a0d6b793051bc12eb80641f
| 15,872
|
py
|
Python
|
docs/tutorials/01_KalmanFilterTutorial.py
|
mgomesborges/Stone-Soup
|
39c7f02ce11e10c9b3c612ad359f6d8bca495266
|
[
"MIT"
] | 1
|
2019-12-26T14:55:03.000Z
|
2019-12-26T14:55:03.000Z
|
docs/tutorials/01_KalmanFilterTutorial.py
|
mgomesborges/Stone-Soup
|
39c7f02ce11e10c9b3c612ad359f6d8bca495266
|
[
"MIT"
] | null | null | null |
docs/tutorials/01_KalmanFilterTutorial.py
|
mgomesborges/Stone-Soup
|
39c7f02ce11e10c9b3c612ad359f6d8bca495266
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
"""
==========================================================
1 - An introduction to Stone Soup: using the Kalman filter
==========================================================
"""
# %%
# This notebook is designed to introduce some of the basic features of Stone Soup using a single
# target scenario and a Kalman filter as an example.
#
# Background and notation
# -----------------------
#
# Let :math:`p(\mathbf{x}_{k})`, the probability distribution over
# :math:`\mathbf{x}_{k} \in \mathbb{R}^n`, represent a hidden state at some discrete point :math:`k`. The
# :math:`k` is most often interpreted as a timestep, but could be any sequential index (hence we
# don't use :math:`t`). The measurement is given by :math:`\mathbf{z}_{k} \in \mathbb{R}^m`.
#
# In Stone Soup, objects derived from the :class:`~.State` class carry information on a variety of
# states, whether they be hidden states, observations, ground truth. Minimally, a state will
# comprise a :class:`~.StateVector` and a timestamp. These can be extended. For example the
# :class:`~.GaussianState` is parameterised by a mean state vector and a covariance matrix. (We'll
# see this in action later.)
#
# Our goal is to infer :math:`p(\mathbf{x}_{k})`, given a sequence of measurements
# :math:`\mathbf{z}_{1}, ..., \mathbf{z}_{k}`, (which we'll write as :math:`\mathbf{z}_{1:k}`). In
# general :math:`\mathbf{z}` can include clutter and false alarms. We'll defer those complications
# to later tutorials and assume, for the moment, that all measurements are generated by a target.
#
# Prediction
# ^^^^^^^^^^
#
# We proceed under the Markovian assumption that :math:`p(\mathbf{x}_k) = \int_{-\infty} ^{\infty}
# p(\mathbf{x}_k|\mathbf{x}_{k-1}) p(\mathbf{x}_{k-1}) d \mathbf{x}_{k-1}`, meaning that the
# distribution over the state of an object at time :math:`k` can be predicted entirely from its
# state at previous time :math:`k-1`. If our understanding of :math:`p(\mathbf{x}_{k-1})` was informed
# by a series of measurements up to and including timestep :math:`k-1`, we can write
#
# .. math::
# p(\mathbf{x}_k|\mathbf{z}_{1:k-1}) =
# \int_{-\infty}^{\infty} p(\mathbf{x}_k|\mathbf{x}_{k-1})
# p(\mathbf{x}_{k-1}|\mathbf{z}_{1:k-1})d \mathbf{x}_{k-1}
#
# This is known as the *Chapman-Kolmogorov* equation. In Stone Soup we refer to this process as
# *prediction* and to an object that undertakes it as a :class:`~.Predictor`. A predictor requires
# a *state transition model*, namely a function which undertakes
# :math:`\mathbf{x}_{k|k-1} = f(\mathbf{x}_{k-1}, \mathbf{w}_k)`, where :math:`\mathbf{w}_k` is a
# noise term. Stone Soup has transition models derived from the :class:`~.TransitionModel` class.
#
# Update
# ^^^^^^
#
# We assume a sensor measurement is generated by some stochastic process represented by a function,
# :math:`\mathbf{z}_k = h(\mathbf{x}_k, \boldsymbol{\nu}_k)` where :math:`\boldsymbol{\nu}_k` is
# the noise.
#
# The goal of the update process is to generate the *posterior state estimate*, from the prediction
# and the measurement. It does this by way of Bayes' rule,
#
# .. math::
# p(\mathbf{x}_k | \mathbf{z}_{1:k}) =
# \frac{ p(\mathbf{z}_{k} | \mathbf{x}_k) p(\mathbf{x}_k | \mathbf{z}_{1:k-1})}
# {p(\mathbf{z}_k)}
#
# where :math:`p(\mathbf{x}_k | \mathbf{z}_{1:k-1})` is the output of the prediction stage,
# :math:`p(\mathbf{z}_{k} | \mathbf{x}_k)` is known as the likelihood, and :math:`p(\mathbf{z}_k)`
# the evidence. In Stone Soup, this calculation is undertaken by the :class:`~.Updater` class. Updaters use a
# :class:`~.MeasurementModel` class which models the effect of :math:`h(\cdot)`.
#
# We then proceed recursively, the posterior distribution at :math:`k-1` becoming the prior
# for the next measurement timestep, and so on
# %%
# A nearly-constant velocity example
# ----------------------------------
#
# We're going to set up a simple scenario in which a target moves at constant velocity with the
# addition of some random noise, (referred to as a *nearly constant velocity* model).
#
# As is customary in Python scripts we begin with some imports. (These ones allow us access to
# mathematical, timing and plotting functions.)
import numpy as np
from datetime import datetime, timedelta
# Figure to plot truth (and future data)
from matplotlib import pyplot as plt
# %%
# Simulate a target
# ^^^^^^^^^^^^^^^^^
#
# We consider a 2d Cartesian scheme where the state vector is
# :math:`[x \ \dot{x} \ y \ \dot{y}]^T`. To start we'll create a simple truth path, sampling at 1
# second intervals. We'll do this by employing one of Stone Soup's native transition models.
#
# These inputs are required:
from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState
from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \
ConstantVelocity
# And the clock starts
start_time = datetime.now()
# %%
# We note that it can sometimes be useful to fix our random number generator in order to probe a
# particular example repeatedly. That option is available by uncommenting the next line.
np.random.seed(1991)
# %%
# The :class:`~.ConstantVelocity` class creates a one-dimensional constant velocity model with
# Gaussian noise. For this simulation :math:`\mathbf{x}_k = F_k \mathbf{x}_{k-1} + \mathbf{w}_k`,
# :math:`\mathbf{w}_k \sim \mathcal{N}(0,Q)`, with
#
# .. math::
# F_{k} &= \begin{bmatrix}
# 1 & \triangle t \\
# 0 & 1 \\
# \end{bmatrix} \\
# Q_k &= \begin{bmatrix}
# \frac{\triangle t^3}{3} & \frac{\triangle t^2}{2} \\
# \frac{\triangle t^2}{2} & \triangle t \\
# \end{bmatrix} q
#
# where :math:`q`, the input parameter to :class:`~.ConstantVelocity`, is the magnitude of the
# noise per :math:`\triangle t`-sized timestep.
# %%
# The :class:`~.CombinedLinearGaussianTransitionModel` class takes a number
# of 1d models and combines them in a linear Gaussian model of arbitrary dimension, :math:`D`.
#
# .. math::
# F_{k}^{D} &= \begin{bmatrix}
# F_k^{1} & & \mathbf{0} \\
# & \ddots & \\
# \mathbf{0} & & F_k^d \\
# \end{bmatrix}\\
# Q_{k}^{D} &= \begin{bmatrix}
# Q_k^{1} & & \mathbf{0} \\
# & \ddots & \\
# \mathbf{0} & & Q_k^d \\
# \end{bmatrix}
#
# We want a 2d simulation, so we'll do:
transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.05),
ConstantVelocity(0.05)])
# %%
# A 'truth path' is created starting at 0,0 moving to the NE
truth = GroundTruthPath([GroundTruthState([0, 1, 0, 1], timestamp=start_time)])
for k in range(1, 21):
truth.append(GroundTruthState(
transition_model.function(truth[k-1], noise=True, time_interval=timedelta(seconds=1)),
timestamp=start_time+timedelta(seconds=k)))
# %%
# Thus the ground truth is generated and we can plot the result
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel("$x$")
ax.set_ylabel("$y$")
ax.axis('equal')
ax.plot([state.state_vector[0] for state in truth],
[state.state_vector[2] for state in truth],
linestyle="--")
# %%
# We can check the :math:`F_k` and :math:`Q_k` matrices (generated over a 1s period).
transition_model.matrix(time_interval=timedelta(seconds=1))
# %%
transition_model.covar(time_interval=timedelta(seconds=1))
# %%
# At this point you can play with the various parameters and see how it effects the simulated
# output.
# %%
# Simulate measurements
# ^^^^^^^^^^^^^^^^^^^^^
#
# We'll use one of Stone Soup's measurement models in order to generate
# measurements from the ground truth. For the moment we assume a 'linear' sensor which detects the
# position, but not velocity, of a target, such that
#
# :math:`\mathbf{z}_k = H_k \mathbf{x}_k + \boldsymbol{\nu}_k`,
# :math:`\boldsymbol{\nu}_k \sim \mathcal{N}(0,R)`, with
#
# .. math::
# H_k &= \begin{bmatrix}
# 1 & 0 & 0 & 0\\
# 0 & 0 & 1 & 0\\
# \end{bmatrix}\\
# R &= \begin{bmatrix}
# 1 & 0\\
# 0 & 1\\
# \end{bmatrix} \omega
#
# where :math:`\omega` is set to 5 initially (but again, feel free to play around).
# %%
# We're going to need a :class:`~.Detection` type to
# store the detections, and a :class:`~.LinearGaussian` measurement model.
from stonesoup.types.detection import Detection
from stonesoup.models.measurement.linear import LinearGaussian
# %%
# The linear Gaussian measurement model is set up by indicating the number of dimensions in the
# state vector and the dimensions that are measured (so specifying :math:`H_k`) and the noise
# covariance matrix :math:`R`.
measurement_model = LinearGaussian(
ndim_state=4, # Number of state dimensions (position and velocity in 2D)
mapping=(0, 2), # Mapping measurement vector index to state index
noise_covar=np.array([[5, 0], # Covariance matrix for Gaussian PDF
[0, 5]])
)
# %%
# Check the output is as we expect
measurement_model.matrix()
# %%
measurement_model.covar()
# %%
# Generate the measurements
measurements = []
for state in truth:
measurement = measurement_model.function(state, noise=True)
measurements.append(Detection(measurement, timestamp=state.timestamp))
# Plot the result
ax.scatter([state.state_vector[0] for state in measurements],
[state.state_vector[1] for state in measurements],
color='b')
fig
# %%
# At this stage you should have a moderately linear ground truth path (dotted line) with a series
# of simulated measurements overplotted (blue circles). Take a moment to fiddle with the numbers in
# :math:`Q` and :math:`R` to see what it does to the path and measurements.
# %%
# Construct a Kalman filter
# ^^^^^^^^^^^^^^^^^^^^^^^^^
#
# We're now ready to build a tracker. We'll use a Kalman filter as it's conceptually the simplest
# to start with. The Kalman filter is described extensively elsewhere [#]_, [#]_, so for the
# moment we just assert that the prediction step proceeds as:
#
# .. math::
# \mathbf{x}_{k|k-1} &= F_{k}\mathbf{x}_{k-1} + B_{k}\mathbf{u}_{k}\\
# P_{k|k-1} &= F_{k}P_{k-1}F_{k}^T + Q_{k},
#
# :math:`B_k \mathbf{u}_k` is a control term which we'll ignore for now (we assume we don't
# influence the target directly).
#
# The update step is:
#
# .. math::
# \mathbf{x}_{k} &= \mathbf{x}_{k|k-1} + K_k(\mathbf{z}_k - H_{k}\mathbf{x}_{k|k-1})\\
# P_{k} &= P_{k|k-1} - K_k H_{k} P_{k|k-1}\\
#
# where,
#
# .. math::
# K_k &= P_{k|k-1} H_{k}^T S_k^{-1}\\
# S_k &= H_{k} P_{k|k-1} H_{k}^T + R_{k}
#
# :math:`\mathbf{z}_k - H_{k}\mathbf{x}_{k|k-1}` is known as the *innovation* and :math:`S_k` the
# *innovation covariance*; :math:`K_k` is the *Kalman gain*.
# %%
# Constructing a predictor and updater in Stone Soup is simple. In a nice division of
# responsibility, a :class:`~.Predictor` takes a :class:`~.TransitionModel` as input and
# an :class:`~.Updater` takes a :class:`~.MeasurementModel` as input. Note that for now we're using
# the same models used to generate the ground truth and the simulated measurements. This won't
# usually be possible and it's an interesting exercise to explore what happens when these
# parameters are mismatched.
from stonesoup.predictor.kalman import KalmanPredictor
predictor = KalmanPredictor(transition_model)
from stonesoup.updater.kalman import KalmanUpdater
updater = KalmanUpdater(measurement_model)
# %%
# Run the Kalman filter
# ^^^^^^^^^^^^^^^^^^^^^
# Now we have the components, we can execute the Kalman filter estimator on the simulated data.
#
# In order to start, we'll need to create the first prior estimate. We're going to use the
# :class:`~.GaussianState` we mentioned earlier. As the name suggests, this parameterises the state
# as :math:`\mathcal{N}(\mathbf{x}_0, P_0)`. By happy chance the initial values are chosen to match
# the truth quite well. You might want to manipulate these to see what happens.
from stonesoup.types.state import GaussianState
prior = GaussianState([[0], [1], [0], [1]], np.diag([1.5, 0.5, 1.5, 0.5]), timestamp=start_time)
# %%
# In this instance data association is done somewhat implicitly. There is one prediction and
# one detection per timestep so no need to think too deeply. Stone Soup discourages such
# (undesirable) practice and requires that a :class:`~.Prediction` and :class:`~.Detection` are
# associated explicitly. This is done by way of a :class:`~.Hypothesis`, the most simple of which
# is a :class:`~.SingleHypothesis` which associates a single predicted state with a single
# detection. There is much more detail on how the :class:`~.Hypothesis` class is used in later
# tutorials.
from stonesoup.types.hypothesis import SingleHypothesis
# %%
# With this, we'll now loop through our measurements, predicting and updating at each timestep.
# Uncontroversially, a Predictor has :meth:`predict` function and an Updater an :meth:`update` to
# do this. Storing the information is facilitated by the top-level :class:`~.Track` class which
# holds a sequence of states.
from stonesoup.types.track import Track
track = Track()
for measurement in measurements:
prediction = predictor.predict(prior, timestamp=measurement.timestamp)
hypothesis = SingleHypothesis(prediction, measurement) # Group a prediction and measurement
post = updater.update(hypothesis)
track.append(post)
prior = track[-1]
# %%
# Plot the resulting track, including uncertainty ellipses
ax.plot([state.state_vector[0] for state in track],
[state.state_vector[2] for state in track],
marker=".")
from matplotlib.patches import Ellipse
for state in track:
w, v = np.linalg.eig(measurement_model.matrix()@state.covar@measurement_model.matrix().T)
max_ind = np.argmax(w)
min_ind = np.argmin(w)
orient = np.arctan2(v[1,max_ind], v[0,max_ind])
ellipse = Ellipse(xy=(state.state_vector[0], state.state_vector[2]),
width=2*np.sqrt(w[max_ind]), height=2*np.sqrt(w[min_ind]),
angle=np.rad2deg(orient),
alpha=0.2,
color='r')
ax.add_artist(ellipse)
fig
# sphinx_gallery_thumbnail_number = 3
# %%
# Key points
# ----------
# 1. Stone Soup is built on a variety of types of :class:`~.State` object. These can be used to
# represent hidden states, observations, estimates, ground truth, and more.
# 2. Bayesian recursion is undertaken by the successive applications of predict and update methods
# using a :class:`~.Predictor` and an :class:`~.Updater`. Explicit association of predicted
# states with measurements is necessary. Broadly speaking predictors apply a
# :class:`~.TransitionModel`, data associators use a
# :class:`~.Hypothesiser` to associate a prediction with a measurement, and updaters use this
# association together with the :class:`~.MeasurementModel` to calculate the posterior state
# estimate.
# %%
# References
# ----------
# .. [#] Kalman 1960, A New Approach to Linear Filtering and Prediction Problems, Transactions of
# the ASME, Journal of Basic Engineering, 82 (series D), 35
# (https://pdfs.semanticscholar.org/bb55/c1c619c30f939fc792b049172926a4a0c0f7.pdf?_ga=2.51363242.2056055521.1592932441-1812916183.1592932441)
# .. [#] Anderson & Moore 2012, Optimal filtering,
# (http://users.cecs.anu.edu.au/~john/papers/BOOK/B02.PDF)
| 43.01355
| 148
| 0.657321
|
fc4e060006e0c6ee949d7a9384136631e6624592
| 2,891
|
py
|
Python
|
tools/data/textrecog/seg_synthtext_converter.py
|
jeffreykuang/mmocr-1
|
b17304edeb493b0a4d7224c23d23b952350d0db5
|
[
"Apache-2.0"
] | 4
|
2021-07-18T22:31:57.000Z
|
2021-07-18T22:36:45.000Z
|
tools/data/textrecog/seg_synthtext_converter.py
|
jeffreykuang/mmocr-1
|
b17304edeb493b0a4d7224c23d23b952350d0db5
|
[
"Apache-2.0"
] | null | null | null |
tools/data/textrecog/seg_synthtext_converter.py
|
jeffreykuang/mmocr-1
|
b17304edeb493b0a4d7224c23d23b952350d0db5
|
[
"Apache-2.0"
] | 1
|
2021-05-16T03:58:52.000Z
|
2021-05-16T03:58:52.000Z
|
import argparse
import json
import os.path as osp
import cv2
def parse_old_label(data_root, in_path, img_size=False):
imgid2imgname = {}
imgid2anno = {}
idx = 0
with open(in_path, 'r') as fr:
for line in fr:
line = line.strip().split()
img_full_path = osp.join(data_root, line[0])
if not osp.exists(img_full_path):
continue
ann_file = osp.join(data_root, line[1])
if not osp.exists(ann_file):
continue
img_info = {}
img_info['file_name'] = line[0]
if img_size:
img = cv2.imread(img_full_path)
h, w = img.shape[:2]
img_info['height'] = h
img_info['width'] = w
imgid2imgname[idx] = img_info
imgid2anno[idx] = []
char_annos = []
with open(ann_file, 'r') as fr:
t = 0
for line in fr:
line = line.strip()
if t == 0:
img_info['text'] = line
else:
char_box = [float(x) for x in line.split()]
char_text = img_info['text'][t - 1]
char_ann = dict(char_box=char_box, char_text=char_text)
char_annos.append(char_ann)
t += 1
imgid2anno[idx] = char_annos
idx += 1
return imgid2imgname, imgid2anno
def gen_line_dict_file(out_path, imgid2imgname, imgid2anno, img_size=False):
with open(out_path, 'w', encoding='utf-8') as fw:
for key, value in imgid2imgname.items():
if key in imgid2anno:
anno = imgid2anno[key]
line_dict = {}
line_dict['file_name'] = value['file_name']
line_dict['text'] = value['text']
if img_size:
line_dict['height'] = value['height']
line_dict['width'] = value['width']
line_dict['annotations'] = anno
line_dict_str = json.dumps(line_dict)
fw.write(line_dict_str + '\n')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-root', help='data root for both image file and anno file')
parser.add_argument(
'--in-path',
help='mapping file of image_name and ann_file,'
' "image_name ann_file" in each line')
parser.add_argument(
'--out-path', help='output txt path with line-json format')
args = parser.parse_args()
return args
def main():
args = parse_args()
imgid2imgname, imgid2anno = parse_old_label(args.data_root, args.in_path)
gen_line_dict_file(args.out_path, imgid2imgname, imgid2anno)
print('finish')
if __name__ == '__main__':
main()
| 31.769231
| 79
| 0.526116
|
f2da5e597e0d91690381bb14609d1d02c0a70566
| 1,998
|
py
|
Python
|
etl/parser/hmdb.py
|
topomancy/gazetteer
|
726fdb258c30c95cf309d07ecad1c159bdfd57a9
|
[
"CC-BY-3.0",
"Apache-2.0"
] | 13
|
2015-01-07T14:37:01.000Z
|
2019-01-17T04:46:35.000Z
|
etl/parser/hmdb.py
|
topomancy/gazetteer
|
726fdb258c30c95cf309d07ecad1c159bdfd57a9
|
[
"CC-BY-3.0",
"Apache-2.0"
] | 1
|
2015-03-04T20:03:25.000Z
|
2015-03-04T20:03:25.000Z
|
etl/parser/hmdb.py
|
LibraryOfCongress/gazetteer
|
34c7f368c5ae51a167d698dbd3a092644eee4c0c
|
[
"MIT"
] | 6
|
2016-12-17T22:29:55.000Z
|
2018-03-01T19:50:19.000Z
|
import sys, json, os, datetime
from shapely.geometry import asShape, mapping
from fiona import collection
from core import Dump
import core
import codecs
#name, cmt, desc, link1_href
def extract_shapefile(shapefile, uri_name, simplify_tolerance=None):
for feature in collection(shapefile, "r"):
geometry = feature["geometry"]
properties = feature["properties"]
#calculate centroid
geom_obj = asShape(geometry)
centroid = feature["geometry"]["coordinates"]
name = properties["name"]
address = {
"street" : feature.get("cmt")
}
#alternate names
alternates = []
feature_code = "HSTS"
source = properties #keep all fields anyhow
# unique URI which internally gets converted to the place id.
uri = properties.get("link1_href") + "#"+feature["id"]
timeframe = {}
updated = datetime.datetime.utcnow().replace(second=0, microsecond=0).isoformat()
place = {
"name":name,
"centroid":centroid,
"feature_code": feature_code,
"geometry":geometry,
"is_primary": True,
"source": source,
"alternate": alternates,
"updated": updated,
"uris":[uri],
"relationships": [],
"timeframe":timeframe,
"admin":[]
}
dump.write(uri, place)
if __name__ == "__main__":
shapefile, dump_path = sys.argv[1:3]
uri_name = "http://www.hmdb.org/"
#simplify_tolerance = .01 # ~ 11km (.001 = 111m)
simplify_tolerance = None
dump_basename = os.path.basename(shapefile)
dump = Dump(dump_path + "/shapefile/"+ dump_basename + ".%04d.json.gz")
dump.max_rows = "1000"
extract_shapefile(shapefile, uri_name, simplify_tolerance)
dump.close()
#python hmdb.py ../../../hmdb.shp hmdbdump
| 25.291139
| 89
| 0.568068
|
1c4237c8308eb73fae7beab7359bba15be44693b
| 306
|
py
|
Python
|
setup.py
|
ipkn/somebox
|
1fedaa07236402269b8ad10dc9563f3d90aaead1
|
[
"MIT"
] | 4
|
2017-12-25T10:36:15.000Z
|
2018-01-01T10:42:34.000Z
|
setup.py
|
ipkn/somebox
|
1fedaa07236402269b8ad10dc9563f3d90aaead1
|
[
"MIT"
] | null | null | null |
setup.py
|
ipkn/somebox
|
1fedaa07236402269b8ad10dc9563f3d90aaead1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
setup(name='somebox',
version='0.0.1',
description='Dropbox-like file sharing service',
author='Jaeseung Ha',
author_email='ipknhama@gmail.com',
url='https://github.com/ipkn/somebox',
packages=['somebox'],
)
| 23.538462
| 54
| 0.640523
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.