hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7205cdf610d822ff489ee20bd6142881de7bb422
| 63
|
py
|
Python
|
examples/djangodesktop/gui.py
|
rafaelalmeida2909/flaskwebgui
|
9c09a23d18bcf364e382bd9fda0b91cf91d37f8f
|
[
"MIT"
] | null | null | null |
examples/djangodesktop/gui.py
|
rafaelalmeida2909/flaskwebgui
|
9c09a23d18bcf364e382bd9fda0b91cf91d37f8f
|
[
"MIT"
] | null | null | null |
examples/djangodesktop/gui.py
|
rafaelalmeida2909/flaskwebgui
|
9c09a23d18bcf364e382bd9fda0b91cf91d37f8f
|
[
"MIT"
] | null | null | null |
from flaskwebgui import FlaskUI
FlaskUI(server='django').run()
| 21
| 31
| 0.793651
|
418654f908de94fce36c3993b9e6c282cc674201
| 933
|
py
|
Python
|
cs3api4lab/exception/exceptions.py
|
michzimny/cs3api4lab
|
c2e2516da1e48a865899a7e16944878c8a248936
|
[
"Apache-2.0"
] | 2
|
2020-08-10T13:12:57.000Z
|
2020-11-26T15:44:48.000Z
|
cs3api4lab/exception/exceptions.py
|
michzimny/cs3api4lab
|
c2e2516da1e48a865899a7e16944878c8a248936
|
[
"Apache-2.0"
] | 73
|
2020-06-15T22:58:47.000Z
|
2022-03-31T08:24:36.000Z
|
cs3api4lab/exception/exceptions.py
|
michzimny/cs3api4lab
|
c2e2516da1e48a865899a7e16944878c8a248936
|
[
"Apache-2.0"
] | 11
|
2020-06-22T10:10:36.000Z
|
2022-02-03T14:05:16.000Z
|
class ShareAlreadyExistsError(Exception):
def __init__(self, message):
self.message = message
super().__init__(self.message)
def __str__(self):
return self.__class__.__name__ + ": " + self.message
class ShareNotExistsError(Exception):
def __init__(self, message):
self.message = message
super().__init__(self.message)
def __str__(self):
return self.__class__.__name__ + ": " + self.message
class InvalidTypeError(Exception):
def __init__(self, message):
self.message = message
super().__init__(self.message)
def __str__(self):
return self.__class__.__name__ + ": " + self.message
class ParamError(Exception):
def __init__(self, key_error):
self.message = "Missing argument: " + str(key_error)
super().__init__(self.message)
def __str__(self):
return self.__class__.__name__ + ": " + self.message
| 27.441176
| 60
| 0.653805
|
9ef497356372493e2d79a8f4c10c5e690172875a
| 840
|
py
|
Python
|
custom_components/xiaomi_cloud_map_extractor/common/vacuum_v2.py
|
dmr1987/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor
|
a1a636db9e3bba2479c3760ec70a3b07dc996e02
|
[
"MIT"
] | 163
|
2020-08-01T12:19:46.000Z
|
2022-03-28T09:04:57.000Z
|
custom_components/xiaomi_cloud_map_extractor/common/vacuum_v2.py
|
Neonox31/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor
|
7bc868278f74fdaba475987dd5fdf485e430fe53
|
[
"MIT"
] | 81
|
2020-08-04T00:28:46.000Z
|
2022-03-29T15:48:51.000Z
|
custom_components/xiaomi_cloud_map_extractor/common/vacuum_v2.py
|
Neonox31/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor
|
7bc868278f74fdaba475987dd5fdf485e430fe53
|
[
"MIT"
] | 28
|
2020-08-02T12:02:24.000Z
|
2022-03-22T00:07:34.000Z
|
from custom_components.xiaomi_cloud_map_extractor.common.vacuum import XiaomiCloudVacuum
class XiaomiCloudVacuumV2(XiaomiCloudVacuum):
def __init__(self, connector, country, user_id, device_id, model):
super().__init__(connector, country, user_id, device_id, model)
def get_map_url(self, map_name):
url = self._connector.get_api_url(self._country) + '/v2/home/get_interim_file_url'
params = {
"data": f'{{"obj_name":"{self._user_id}/{self._device_id}/{map_name}"}}'
}
api_response = self._connector.execute_api_call_encrypted(url, params)
if api_response is None or "result" not in api_response or "url" not in api_response["result"]:
return None
return api_response["result"]["url"]
def should_get_map_from_vacuum(self):
return False
| 40
| 103
| 0.696429
|
1fc760ded06005df2d9bfddf0a92d762b64c5970
| 8,045
|
py
|
Python
|
roles/aur/library/aur.py
|
ljmf00/home-server
|
e0b92688babfa087b109a3f277baf408cc983101
|
[
"MIT"
] | 1
|
2021-06-16T14:07:09.000Z
|
2021-06-16T14:07:09.000Z
|
roles/aur/library/aur.py
|
ljmf00/home-infrastructure
|
e0b92688babfa087b109a3f277baf408cc983101
|
[
"MIT"
] | 5
|
2020-04-10T17:34:59.000Z
|
2020-05-17T17:16:32.000Z
|
roles/aur/library/aur.py
|
ljmf00/home-server
|
e0b92688babfa087b109a3f277baf408cc983101
|
[
"MIT"
] | 1
|
2020-04-16T04:15:48.000Z
|
2020-04-16T04:15:48.000Z
|
#!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import open_url
import json
import tarfile
import os
import os.path
import tempfile
import urllib.parse
DOCUMENTATION = '''
---
module: aur
short_description: Manage packages from the AUR
description:
- Manage packages from the Arch User Repository (AUR)
author:
- Kewl <xrjy@nygb.rh.bet(rot13)>
options:
name:
description:
- Name or list of names of the package(s) to install or upgrade.
upgrade:
description:
- Whether or not to upgrade whole system.
type: bool
default: no
use:
description:
- The helper to use, 'auto' uses the first known helper found and makepkg as a fallback.
default: auto
choices: [ auto, yay, pacaur, trizen, pikaur, aurman, makepkg ]
skip_installed:
description:
- Skip operations if the package is present.
type: bool
default: no
skip_pgp_check:
description:
- Only valid with makepkg.
Skip PGP signatures verification of source file.
This is useful when installing packages without GnuPG (properly) configured.
type: bool
default: no
ignore_arch:
description:
- Only valid with makepkg.
Ignore a missing or incomplete arch field, useful when the PKGBUILD does not have the arch=('yourarch') field.
type: bool
default: no
aur_only:
description:
- Limit operation to the AUR. Compatible with yay, aurman, pacaur and trizen.
notes:
- When used with a `loop:` each package will be processed individually,
it is much more efficient to pass the list directly to the `name` option.
'''
RETURN = '''
msg:
description: action that has been taken
helper:
the helper that was actually used
'''
EXAMPLES = '''
- name: Install trizen using makepkg, skip if trizen is already installed
aur: name=trizen use=makepkg skip_installed=true
become: yes
become_user: aur_builder
'''
def_lang = ['env', 'LC_ALL=C']
use_cmd = {
'yay': ['yay', '-S', '--noconfirm', '--needed', '--cleanafter'],
'pacaur': ['pacaur', '-S', '--noconfirm', '--noedit', '--needed'],
'trizen': ['trizen', '-S', '--noconfirm', '--noedit', '--needed'],
'pikaur': ['pikaur', '-S', '--noconfirm', '--noedit', '--needed'],
'aurman': ['aurman', '-S', '--noconfirm', '--noedit', '--needed', '--skip_news', '--pgp_fetch', '--skip_new_locations'],
'makepkg': ['makepkg', '--syncdeps', '--install', '--noconfirm', '--needed']
}
has_aur_option = ['yay', 'pacaur', 'trizen', 'aurman']
def package_installed(module, package):
"""
Determine if the package is already installed
"""
rc, _, _ = module.run_command(['pacman', '-Q', package], check_rc=False)
return rc == 0
def check_packages(module, packages):
"""
Inform the user what would change if the module were run
"""
would_be_changed = []
for package in packages:
installed = package_installed(module, package)
if not installed:
would_be_changed.append(package)
if would_be_changed:
status = True
if len(packages) > 1:
message = '{} package(s) would be installed'.format(len(would_be_changed))
else:
message = 'package would be installed'
else:
status = False
if len(packages) > 1:
message = 'all packages are already installed'
else:
message = 'package is already installed'
module.exit_json(changed=status, msg=message)
def install_with_makepkg(module, package):
"""
Install the specified package with makepkg
"""
module.get_bin_path('fakeroot', required=True)
f = open_url('https://aur.archlinux.org/rpc/?v=5&type=info&arg={}'.format(urllib.parse.quote(package)))
result = json.loads(f.read().decode('utf8'))
if result['resultcount'] != 1:
return (1, '', 'package {} not found'.format(package))
result = result['results'][0]
f = open_url('https://aur.archlinux.org/{}'.format(result['URLPath']))
current_path = os.getcwd()
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
tar_file = '{}.tar.gz'.format(result['Name'])
with open(tar_file, 'wb') as out:
out.write(f.read())
tar = tarfile.open(tar_file)
tar.extractall()
tar.close()
os.chdir(format(result['Name']))
if module.params['skip_pgp_check']:
use_cmd['makepkg'].append('--skippgpcheck')
if module.params['ignore_arch']:
use_cmd['makepkg'].append('--ignorearch')
rc, out, err = module.run_command(use_cmd['makepkg'], check_rc=True)
os.chdir(current_path)
return (rc, out, err)
def upgrade(module, use, aur_only):
"""
Upgrade the whole system
"""
assert use in use_cmd
rc, out, err = module.run_command(def_lang + use_cmd[use] + ['--aur' if (aur_only and use in has_aur_option) else None] + ['-u'], check_rc=True)
module.exit_json(
changed=not (out == '' or 'nothing to do' in out or 'No AUR updates found' in out),
msg='upgraded system',
helper=use,
)
def install_packages(module, packages, use, skip_installed, aur_only):
"""
Install the specified packages
"""
assert use in use_cmd
changed_iter = False
for package in packages:
if skip_installed:
if package_installed(module, package):
rc = 0
continue
if use == 'makepkg':
rc, out, err = install_with_makepkg(module, package)
else:
rc, out, err = module.run_command(def_lang + use_cmd[use] + ['--aur' if (aur_only and use in has_aur_option) else None] + [package], check_rc=True)
changed_iter = changed_iter or not (out == '' or '-- skipping' in out or 'nothing to do' in out)
message = 'installed package(s)' if changed_iter else 'package(s) already installed'
module.exit_json(
changed=changed_iter,
msg=message if not rc else err,
helper=use,
rc=rc,
)
def main():
module = AnsibleModule(
argument_spec={
'name': {
'type': 'list',
},
'ignore_arch': {
'default': False,
'type': 'bool',
},
'upgrade': {
'default': False,
'type': 'bool',
},
'use': {
'default': 'auto',
'choices': ['auto'] + list(use_cmd.keys()),
},
'skip_installed': {
'default': False,
'type': 'bool',
},
'skip_pgp_check': {
'default': False,
'type': 'bool',
},
'aur_only': {
'default': False,
'type': 'bool',
},
},
required_one_of=[['name', 'upgrade']],
supports_check_mode=True
)
params = module.params
if module.check_mode:
check_packages(module, params['name'])
if params['use'] == 'auto':
use = 'makepkg'
# auto: select the first helper for which the bin is found
for k in use_cmd:
if module.get_bin_path(k):
use = k
break
else:
use = params['use']
if params['upgrade'] and (params['name'] or params['skip_installed'] or use == 'makepkg'):
module.fail_json(msg="Upgrade cannot be used with this option.")
else:
if params['upgrade']:
upgrade(module, use, params['aur_only'])
else:
install_packages(module, params['name'], use, params['skip_installed'], params['aur_only'])
if __name__ == '__main__':
main()
| 30.018657
| 159
| 0.579366
|
b3c15672c92377af1a2b79a15bec6f7f095f332c
| 11,081
|
py
|
Python
|
whatsapp-bot-venv/Lib/site-packages/twilio/rest/pricing/v1/voice/country.py
|
RedaMastouri/ConversationalPythonicChatBot
|
f204276d4b80348d42091b17d1a7d9eea33fb4e0
|
[
"MIT"
] | null | null | null |
whatsapp-bot-venv/Lib/site-packages/twilio/rest/pricing/v1/voice/country.py
|
RedaMastouri/ConversationalPythonicChatBot
|
f204276d4b80348d42091b17d1a7d9eea33fb4e0
|
[
"MIT"
] | null | null | null |
whatsapp-bot-venv/Lib/site-packages/twilio/rest/pricing/v1/voice/country.py
|
RedaMastouri/ConversationalPythonicChatBot
|
f204276d4b80348d42091b17d1a7d9eea33fb4e0
|
[
"MIT"
] | null | null | null |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class CountryList(ListResource):
""" """
def __init__(self, version):
"""
Initialize the CountryList
:param Version version: Version that contains the resource
:returns: twilio.rest.pricing.v1.voice.country.CountryList
:rtype: twilio.rest.pricing.v1.voice.country.CountryList
"""
super(CountryList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Voice/Countries'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams CountryInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.pricing.v1.voice.country.CountryInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists CountryInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.pricing.v1.voice.country.CountryInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of CountryInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of CountryInstance
:rtype: twilio.rest.pricing.v1.voice.country.CountryPage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return CountryPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of CountryInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of CountryInstance
:rtype: twilio.rest.pricing.v1.voice.country.CountryPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return CountryPage(self._version, response, self._solution)
def get(self, iso_country):
"""
Constructs a CountryContext
:param iso_country: The ISO country code
:returns: twilio.rest.pricing.v1.voice.country.CountryContext
:rtype: twilio.rest.pricing.v1.voice.country.CountryContext
"""
return CountryContext(self._version, iso_country=iso_country, )
def __call__(self, iso_country):
"""
Constructs a CountryContext
:param iso_country: The ISO country code
:returns: twilio.rest.pricing.v1.voice.country.CountryContext
:rtype: twilio.rest.pricing.v1.voice.country.CountryContext
"""
return CountryContext(self._version, iso_country=iso_country, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Pricing.V1.CountryList>'
class CountryPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the CountryPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.pricing.v1.voice.country.CountryPage
:rtype: twilio.rest.pricing.v1.voice.country.CountryPage
"""
super(CountryPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of CountryInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.pricing.v1.voice.country.CountryInstance
:rtype: twilio.rest.pricing.v1.voice.country.CountryInstance
"""
return CountryInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Pricing.V1.CountryPage>'
class CountryContext(InstanceContext):
""" """
def __init__(self, version, iso_country):
"""
Initialize the CountryContext
:param Version version: Version that contains the resource
:param iso_country: The ISO country code
:returns: twilio.rest.pricing.v1.voice.country.CountryContext
:rtype: twilio.rest.pricing.v1.voice.country.CountryContext
"""
super(CountryContext, self).__init__(version)
# Path Solution
self._solution = {'iso_country': iso_country, }
self._uri = '/Voice/Countries/{iso_country}'.format(**self._solution)
def fetch(self):
"""
Fetch a CountryInstance
:returns: Fetched CountryInstance
:rtype: twilio.rest.pricing.v1.voice.country.CountryInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return CountryInstance(self._version, payload, iso_country=self._solution['iso_country'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Pricing.V1.CountryContext {}>'.format(context)
class CountryInstance(InstanceResource):
""" """
def __init__(self, version, payload, iso_country=None):
"""
Initialize the CountryInstance
:returns: twilio.rest.pricing.v1.voice.country.CountryInstance
:rtype: twilio.rest.pricing.v1.voice.country.CountryInstance
"""
super(CountryInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'country': payload.get('country'),
'iso_country': payload.get('iso_country'),
'outbound_prefix_prices': payload.get('outbound_prefix_prices'),
'inbound_call_prices': payload.get('inbound_call_prices'),
'price_unit': payload.get('price_unit'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'iso_country': iso_country or self._properties['iso_country'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CountryContext for this CountryInstance
:rtype: twilio.rest.pricing.v1.voice.country.CountryContext
"""
if self._context is None:
self._context = CountryContext(self._version, iso_country=self._solution['iso_country'], )
return self._context
@property
def country(self):
"""
:returns: The name of the country
:rtype: unicode
"""
return self._properties['country']
@property
def iso_country(self):
"""
:returns: The ISO country code
:rtype: unicode
"""
return self._properties['iso_country']
@property
def outbound_prefix_prices(self):
"""
:returns: The list of OutboundPrefixPrice records
:rtype: unicode
"""
return self._properties['outbound_prefix_prices']
@property
def inbound_call_prices(self):
"""
:returns: The list of InboundCallPrice records
:rtype: unicode
"""
return self._properties['inbound_call_prices']
@property
def price_unit(self):
"""
:returns: The currency in which prices are measured, in ISO 4127 format (e.g. usd, eur, jpy)
:rtype: unicode
"""
return self._properties['price_unit']
@property
def url(self):
"""
:returns: The absolute URL of the resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a CountryInstance
:returns: Fetched CountryInstance
:rtype: twilio.rest.pricing.v1.voice.country.CountryInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Pricing.V1.CountryInstance {}>'.format(context)
| 32.784024
| 102
| 0.62359
|
27ab7a9649a7684ea8f61d604ec9bcd54986e9d2
| 12,128
|
py
|
Python
|
example_specs/models/unet2d_multi_tensor/multi_tensor_unet.py
|
bioimage-io/spec-bioimage-io
|
b534001c67b3903dbd47f80a8b3ff027cd249119
|
[
"MIT"
] | 6
|
2021-05-10T09:08:12.000Z
|
2022-03-27T17:44:08.000Z
|
example_specs/models/unet2d_multi_tensor/multi_tensor_unet.py
|
bioimage-io/spec-bioimage-io
|
b534001c67b3903dbd47f80a8b3ff027cd249119
|
[
"MIT"
] | 227
|
2021-03-16T23:40:13.000Z
|
2022-03-17T14:30:53.000Z
|
example_specs/models/unet2d_multi_tensor/multi_tensor_unet.py
|
bioimage-io/spec-bioimage-io
|
b534001c67b3903dbd47f80a8b3ff027cd249119
|
[
"MIT"
] | 5
|
2021-03-26T14:16:49.000Z
|
2021-11-28T11:24:40.000Z
|
import torch
import torch.nn as nn
class UNetBase(nn.Module):
""" """
def __init__(self, encoder, base, decoder, out_conv=None, final_activation=None):
super().__init__()
if len(encoder) != len(decoder):
raise ValueError(f"Incompatible depth of encoder (depth={len(encoder)}) and decoder (depth={len(decoder)})")
self.encoder = encoder
self.base = base
self.decoder = decoder
if out_conv is None:
self._out_channels = self.decoder.out_channels
else:
self._out_channels = out_conv.out_channels
self.out_conv = out_conv
self.final_activation = self._get_activation(final_activation)
@property
def in_channels(self):
return self.encoder.in_channels
@property
def out_channels(self):
return self._out_channels
@property
def depth(self):
return len(self.encoder)
def _get_activation(self, activation):
return_activation = None
if activation is None:
return None
if isinstance(activation, nn.Module):
return activation
if isinstance(activation, str):
return_activation = getattr(nn, activation, None)
if return_activation is None:
raise ValueError(f"Invalid activation: {activation}")
return return_activation()
# load encoder / decoder / base states for pretraining
def load_encoder_state(self, state):
self.encoder.load_state_dict(state)
def load_decoder_state(self, state):
self.decoder.load_state_dict(state)
def load_base_state(self, state):
self.base.load_state_dict(state)
def _apply_default(self, x):
self.encoder.return_outputs = True
self.decoder.return_outputs = False
x, encoder_out = self.encoder(x)
x = self.base(x)
x = self.decoder(x, encoder_inputs=encoder_out[::-1])
if self.out_conv is not None:
x = self.out_conv(x)
if self.final_activation is not None:
x = self.final_activation(x)
return x
def forward(self, *x):
assert isinstance(x, (list, tuple)), type(x)
# fix issue in onnx export
if isinstance(x[0], list) and len(x) == 1:
x = x[0]
assert len(x) == self.in_channels, f"{len(x)}, {self.in_channels}"
x = torch.cat(x, dim=1)
out = self._apply_default(x)
assert out.shape[1] == self.out_channels
return [out[:, i : i + 1] for i in range(out.shape[1])]
def _update_conv_kwargs(kwargs, scale_factor):
# if the scale factor is a scalar or all entries are the same we don't need to update the kwargs
if isinstance(scale_factor, int) or scale_factor.count(scale_factor[0]) == len(scale_factor):
return kwargs
else: # otherwise set anisotropic kernel
kernel_size = kwargs.get("kernel_size", 3)
padding = kwargs.get("padding", 1)
# bail out if kernel size or padding aren't scalars, because it's
# unclear what to do in this case
if not (isinstance(kernel_size, int) and isinstance(padding, int)):
return kwargs
kernel_size = tuple(1 if factor == 1 else kernel_size for factor in scale_factor)
padding = tuple(0 if factor == 1 else padding for factor in scale_factor)
kwargs.update({"kernel_size": kernel_size, "padding": padding})
return kwargs
class Encoder(nn.Module):
def __init__(
self, features, scale_factors, conv_block_impl, pooler_impl, anisotropic_kernel=False, **conv_block_kwargs
):
super().__init__()
if len(features) != len(scale_factors) + 1:
raise ValueError("Incompatible number of features {len(features)} and scale_factors {len(scale_factors)}")
conv_kwargs = [conv_block_kwargs] * len(scale_factors)
if anisotropic_kernel:
conv_kwargs = [
_update_conv_kwargs(kwargs, scale_factor) for kwargs, scale_factor in zip(conv_kwargs, scale_factors)
]
self.blocks = nn.ModuleList(
[
conv_block_impl(inc, outc, **kwargs)
for inc, outc, kwargs in zip(features[:-1], features[1:], conv_kwargs)
]
)
self.poolers = nn.ModuleList([pooler_impl(factor) for factor in scale_factors])
self.return_outputs = True
self.in_channels = features[0]
self.out_channels = features[-1]
def __len__(self):
return len(self.blocks)
def forward(self, x):
encoder_out = []
for block, pooler in zip(self.blocks, self.poolers):
x = block(x)
encoder_out.append(x)
x = pooler(x)
if self.return_outputs:
return x, encoder_out
else:
return x
class Decoder(nn.Module):
def __init__(
self, features, scale_factors, conv_block_impl, sampler_impl, anisotropic_kernel=False, **conv_block_kwargs
):
super().__init__()
if len(features) != len(scale_factors) + 1:
raise ValueError("Incompatible number of features {len(features)} and scale_factors {len(scale_factors)}")
conv_kwargs = [conv_block_kwargs] * len(scale_factors)
if anisotropic_kernel:
conv_kwargs = [
_update_conv_kwargs(kwargs, scale_factor) for kwargs, scale_factor in zip(conv_kwargs, scale_factors)
]
self.blocks = nn.ModuleList(
[
conv_block_impl(inc, outc, **kwargs)
for inc, outc, kwargs in zip(features[:-1], features[1:], conv_kwargs)
]
)
self.samplers = nn.ModuleList(
[sampler_impl(factor, inc, outc) for factor, inc, outc in zip(scale_factors, features[:-1], features[1:])]
)
self.return_outputs = False
self.in_channels = features[0]
self.out_channels = features[-1]
def __len__(self):
return len(self.blocks)
# FIXME this prevents traces from being valid for other input sizes, need to find
# a solution to traceable cropping
def _crop(self, x, shape):
shape_diff = [(xsh - sh) // 2 for xsh, sh in zip(x.shape, shape)]
crop = tuple([slice(sd, xsh - sd) for sd, xsh in zip(shape_diff, x.shape)])
return x[crop]
# # Implementation with torch.narrow, does not fix the tracing warnings!
# for dim, (sh, sd) in enumerate(zip(shape, shape_diff)):
# x = torch.narrow(x, dim, sd, sh)
# return x
def _concat(self, x1, x2):
return torch.cat([x1, self._crop(x2, x1.shape)], dim=1)
def forward(self, x, encoder_inputs):
if len(encoder_inputs) != len(self.blocks):
raise ValueError(f"Invalid number of encoder_inputs: expect {len(self.blocks)}, got {len(encoder_inputs)}")
decoder_out = []
for block, sampler, from_encoder in zip(self.blocks, self.samplers, encoder_inputs):
x = sampler(x)
x = block(self._concat(x, from_encoder))
decoder_out.append(x)
if self.return_outputs:
return decoder_out + [x]
else:
return x
def get_norm_layer(norm, dim, channels, n_groups=32):
if norm is None:
return None
if norm == "InstanceNorm":
return nn.InstanceNorm2d(channels) if dim == 2 else nn.InstanceNorm3d(channels)
elif norm == "GroupNorm":
return nn.GroupNorm(min(n_groups, channels), channels)
elif norm == "BatchNorm":
return nn.BatchNorm2d(channels) if dim == 2 else nn.BatchNorm3d(channels)
else:
raise ValueError(f"Invalid norm: expect one of 'InstanceNorm', 'BatchNorm' or 'GroupNorm', got {norm}")
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, dim, kernel_size=3, padding=1, norm=None):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
conv = nn.Conv2d if dim == 2 else nn.Conv3d
if norm is None:
self.block = nn.Sequential(
conv(in_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.ReLU(inplace=True),
conv(out_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.ReLU(inplace=True),
)
else:
self.block = nn.Sequential(
get_norm_layer(norm, dim, in_channels),
conv(in_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.ReLU(inplace=True),
get_norm_layer(norm, dim, out_channels),
conv(out_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.ReLU(inplace=True),
)
def forward(self, x):
return self.block(x)
class Upsampler(nn.Module):
def __init__(self, scale_factor, in_channels, out_channels, dim, mode):
super().__init__()
self.mode = mode
self.scale_factor = scale_factor
conv = nn.Conv2d if dim == 2 else nn.Conv3d
self.conv = conv(in_channels, out_channels, 1)
def forward(self, x):
x = nn.functional.interpolate(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=False)
x = self.conv(x)
return x
#
# 2d unet implementations
#
class ConvBlock2d(ConvBlock):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__(in_channels, out_channels, dim=2, **kwargs)
class Upsampler2d(Upsampler):
def __init__(self, scale_factor, in_channels, out_channels, mode="bilinear"):
super().__init__(scale_factor, in_channels, out_channels, dim=2, mode=mode)
class MultiTensorUNet(UNetBase):
def __init__(
self,
in_channels,
out_channels,
depth=4,
initial_features=32,
gain=2,
final_activation=None,
return_side_outputs=False,
conv_block_impl=ConvBlock2d,
pooler_impl=nn.MaxPool2d,
sampler_impl=Upsampler2d,
**conv_block_kwargs,
):
features_encoder = [in_channels] + [initial_features * gain ** i for i in range(depth)]
features_decoder = [initial_features * gain ** i for i in range(depth + 1)][::-1]
scale_factors = depth * [2]
if return_side_outputs:
if isinstance(out_channels, int) or out_channels is None:
out_channels = [out_channels] * depth
if len(out_channels) != depth:
raise ValueError()
out_conv = nn.ModuleList(
[nn.Conv2d(feat, outc, 1) for feat, outc in zip(features_decoder[1:], out_channels)]
)
else:
out_conv = None if out_channels is None else nn.Conv2d(features_decoder[-1], out_channels, 1)
super().__init__(
encoder=Encoder(
features=features_encoder,
scale_factors=scale_factors,
conv_block_impl=conv_block_impl,
pooler_impl=pooler_impl,
**conv_block_kwargs,
),
decoder=Decoder(
features=features_decoder,
scale_factors=scale_factors[::-1],
conv_block_impl=conv_block_impl,
sampler_impl=sampler_impl,
**conv_block_kwargs,
),
base=conv_block_impl(features_encoder[-1], features_encoder[-1] * gain, **conv_block_kwargs),
out_conv=out_conv,
final_activation=final_activation,
)
self.init_kwargs = {
"in_channels": in_channels,
"out_channels": out_channels,
"depth": depth,
"initial_features": initial_features,
"gain": gain,
"final_activation": final_activation,
"return_side_outputs": return_side_outputs,
"conv_block_impl": conv_block_impl,
"pooler_impl": pooler_impl,
"sampler_impl": sampler_impl,
**conv_block_kwargs,
}
| 35.461988
| 120
| 0.61222
|
fe87beef40fe004b6da1c4eacbff7e6cb77de080
| 177
|
py
|
Python
|
prog1/implementacoes/uri/1020.py
|
gabrielmbs/Tamburetei
|
b7716d4e7683dc17831a574241bd26c0a67ca929
|
[
"MIT"
] | 209
|
2018-10-31T02:32:30.000Z
|
2021-12-18T02:35:07.000Z
|
prog1/implementacoes/uri/1020.py
|
gabrielmbs/Tamburetei
|
b7716d4e7683dc17831a574241bd26c0a67ca929
|
[
"MIT"
] | 304
|
2018-10-31T02:16:26.000Z
|
2021-12-20T19:41:27.000Z
|
prog1/implementacoes/uri/1020.py
|
gabrielmbs/Tamburetei
|
b7716d4e7683dc17831a574241bd26c0a67ca929
|
[
"MIT"
] | 205
|
2018-10-31T02:38:41.000Z
|
2021-12-17T17:57:35.000Z
|
n = int(input())
anos = n // 365
meses = (n % 365) // 30
dias = (n % 365)%30
print("{} ano(s)".format(anos))
print("{} mes(es)".format(meses))
print("{} dia(s)".format(dias))
| 17.7
| 33
| 0.548023
|
dd338d3ece200117940390fed497ffd2a5778d7a
| 3,579
|
py
|
Python
|
zonemanager/fc_san_lookup_service.py
|
hybrid-storage-dev/cinder-fs-111t-hybrid-cherry
|
86eb7e8b71c26bc39164fa18a9faa1065e4c1fc1
|
[
"Apache-2.0"
] | null | null | null |
zonemanager/fc_san_lookup_service.py
|
hybrid-storage-dev/cinder-fs-111t-hybrid-cherry
|
86eb7e8b71c26bc39164fa18a9faa1065e4c1fc1
|
[
"Apache-2.0"
] | null | null | null |
zonemanager/fc_san_lookup_service.py
|
hybrid-storage-dev/cinder-fs-111t-hybrid-cherry
|
86eb7e8b71c26bc39164fa18a9faa1065e4c1fc1
|
[
"Apache-2.0"
] | null | null | null |
# (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Base Lookup Service for name server lookup to find the initiator to target port
mapping for available SAN contexts.
Vendor specific lookup classes are expected to implement the interfaces
defined in this class.
"""
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.volume import configuration as config
from cinder.zonemanager import fc_common
from cinder.zonemanager import fc_zone_manager
LOG = logging.getLogger(__name__)
class FCSanLookupService(fc_common.FCCommon):
"""Base Lookup Service.
Base Lookup Service for name server lookup to find the initiator to
target port mapping for available SAN contexts.
"""
lookup_service = None
def __init__(self, **kwargs):
super(FCSanLookupService, self).__init__(**kwargs)
self.configuration = kwargs.get('configuration', None)
opts = fc_zone_manager.zone_manager_opts
self.configuration = config.Configuration(opts, 'fc-zone-manager')
def get_device_mapping_from_network(self, initiator_list, target_list):
"""Get device mapping from FC network.
Gets a filtered list of initiator ports and target ports for each SAN
available.
:param initiator_list list of initiator port WWN
:param target_list list of target port WWN
:return device wwn map in following format
{
<San name>: {
'initiator_port_wwn_list':
('200000051E55A100', '200000051E55A121'..)
'target_port_wwn_list':
('100000051E55A100', '100000051E55A121'..)
}
}
:raise Exception when a lookup service implementation is not specified
in cinder.conf:fc_san_lookup_service
"""
# Initialize vendor specific implementation of FCZoneDriver
if (self.configuration.fc_san_lookup_service):
lookup_service = self.configuration.fc_san_lookup_service
LOG.debug("Lookup service to invoke: "
"%s", lookup_service)
self.lookup_service = importutils.import_object(
lookup_service, configuration=self.configuration)
else:
msg = _("Lookup service not configured. Config option for "
"fc_san_lookup_service need to specify a concrete "
"implementation of lookup service")
LOG.error(msg)
raise exception.FCSanLookupServiceException(msg)
try:
device_map = self.lookup_service.get_device_mapping_from_network(
initiator_list, target_list)
except Exception as e:
LOG.error(e)
raise exception.FCSanLookupServiceException(e)
return device_map
| 37.673684
| 79
| 0.678402
|
df56cfac960bab3f2e5fded1e8496af153d118e1
| 10
|
py
|
Python
|
tests/test_files/non_callable.py
|
Procrat/typy
|
668cedb7f929256a09f565af9ee43c02889bec3f
|
[
"MIT"
] | 3
|
2016-03-08T09:55:20.000Z
|
2016-09-09T12:54:12.000Z
|
tests/test_files/non_callable.py
|
Procrat/typy
|
668cedb7f929256a09f565af9ee43c02889bec3f
|
[
"MIT"
] | null | null | null |
tests/test_files/non_callable.py
|
Procrat/typy
|
668cedb7f929256a09f565af9ee43c02889bec3f
|
[
"MIT"
] | null | null | null |
a = 5
a()
| 3.333333
| 5
| 0.3
|
82568f8455c817522855c2e54acb5657cb0925a9
| 7,452
|
py
|
Python
|
nova/tests/pci/test_pci_request.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/pci/test_pci_request.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/pci/test_pci_request.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for PCI request."""
from nova import exception
from nova.pci import pci_request as pci_request
from nova import test
_fake_alias1 = """{
"name": "QuicAssist",
"capability_type": "pci",
"product_id": "4443",
"vendor_id": "8086",
"device_type": "ACCEL"
}"""
_fake_alias11 = """{
"name": "QuicAssist",
"capability_type": "pci",
"product_id": "4444",
"vendor_id": "8086",
"device_type": "ACCEL"
}"""
_fake_alias2 = """{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "1111",
"device_type": "N"
}"""
_fake_alias3 = """{
"name": "IntelNIC",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "NIC"
}"""
class AliasTestCase(test.NoDBTestCase):
def test_good_alias(self):
self.flags(pci_alias=[_fake_alias1])
als = pci_request._get_alias_from_config()
self.assertIsInstance(als['QuicAssist'], list)
expect_dict = {
"capability_type": "pci",
"product_id": "4443",
"vendor_id": "8086",
"device_type": "ACCEL"
}
self.assertEqual(expect_dict, als['QuicAssist'][0])
def test_multispec_alias(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias11])
als = pci_request._get_alias_from_config()
self.assertIsInstance(als['QuicAssist'], list)
expect_dict1 = {
"capability_type": "pci",
"product_id": "4443",
"vendor_id": "8086",
"device_type": "ACCEL"
}
expect_dict2 = {
"capability_type": "pci",
"product_id": "4444",
"vendor_id": "8086",
"device_type": "ACCEL"
}
self.assertEqual(expect_dict1, als['QuicAssist'][0])
self.assertEqual(expect_dict2, als['QuicAssist'][1])
def test_wrong_type_aliase(self):
self.flags(pci_alias=[_fake_alias2])
self.assertRaises(exception.PciInvalidAlias,
pci_request._get_alias_from_config)
def test_wrong_product_id_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "g111",
"vendor_id": "1111",
"device_type": "NIC"
}"""])
self.assertRaises(exception.PciInvalidAlias,
pci_request._get_alias_from_config)
def test_wrong_vendor_id_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "0xg111",
"device_type": "NIC"
}"""])
self.assertRaises(exception.PciInvalidAlias,
pci_request._get_alias_from_config)
def test_wrong_cap_type_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "usb",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "NIC"
}"""])
self.assertRaises(exception.PciInvalidAlias,
pci_request._get_alias_from_config)
def test_dup_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "NIC"
}""",
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "ACCEL"
}"""])
self.assertRaises(
exception.PciInvalidAlias,
pci_request._get_alias_from_config)
def _verify_result(self, expected, real):
exp_real = zip(expected, real)
for exp, real in exp_real:
self.assertEqual(exp['count'], real.count)
self.assertEqual(exp['alias_name'], real.alias_name)
self.assertEqual(exp['spec'], real.spec)
def test_aliase_2_request(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
expect_request = [
{'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'device_type': 'ACCEL',
'capability_type': 'pci'}],
'alias_name': 'QuicAssist'},
{'count': 1,
'spec': [{'vendor_id': '8086', 'product_id': '1111',
'device_type': "NIC",
'capability_type': 'pci'}],
'alias_name': 'IntelNIC'}, ]
requests = pci_request._translate_alias_to_requests(
"QuicAssist : 3, IntelNIC: 1")
self.assertEqual(set([p['count'] for p in requests]), set([1, 3]))
self._verify_result(expect_request, requests)
def test_aliase_2_request_invalid(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
self.assertRaises(exception.PciRequestAliasNotDefined,
pci_request._translate_alias_to_requests,
"QuicAssistX : 3")
def test_get_pci_requests_from_flavor(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
expect_request = [
{'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'device_type': "ACCEL",
'capability_type': 'pci'}],
'alias_name': 'QuicAssist'},
{'count': 1,
'spec': [{'vendor_id': '8086', 'product_id': '1111',
'device_type': "NIC",
'capability_type': 'pci'}],
'alias_name': 'IntelNIC'}, ]
flavor = {'extra_specs': {"pci_passthrough:alias":
"QuicAssist:3, IntelNIC: 1"}}
requests = pci_request.get_pci_requests_from_flavor(flavor)
self.assertEqual(set([1, 3]),
set([p.count for p in requests.requests]))
self._verify_result(expect_request, requests.requests)
def test_get_pci_requests_from_flavor_no_extra_spec(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
flavor = {}
requests = pci_request.get_pci_requests_from_flavor(flavor)
self.assertEqual([], requests.requests)
| 35.485714
| 78
| 0.527644
|
1f085fc27e72eaef3e1e13b5f5e2ab659a78964d
| 27,668
|
py
|
Python
|
lib/galaxy/model/metadata.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/model/metadata.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | 2
|
2017-05-18T16:12:55.000Z
|
2022-03-08T12:08:43.000Z
|
lib/galaxy/model/metadata.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | null | null | null |
"""
Galaxy Metadata
"""
import copy
import json
import logging
import os
import sys
import tempfile
import weakref
from collections import OrderedDict
from collections.abc import Mapping
from os.path import abspath
from typing import (
Any,
Iterator,
Optional,
TYPE_CHECKING,
Union,
)
from sqlalchemy.orm import object_session
from sqlalchemy.orm.attributes import flag_modified
import galaxy.model
from galaxy.model.scoped_session import galaxy_scoped_session
from galaxy.security.object_wrapper import sanitize_lists_to_string
from galaxy.util import (
form_builder,
listify,
string_as_bool,
stringify_dictionary_keys,
unicodify,
)
from galaxy.util.json import safe_dumps
if TYPE_CHECKING:
from galaxy.model import DatasetInstance
from galaxy.model.none_like import NoneDataset
from galaxy.model.store import SessionlessContext
log = logging.getLogger(__name__)
STATEMENTS = "__galaxy_statements__" # this is the name of the property in a Datatype class where new metadata spec element Statements are stored
class Statement:
"""
This class inserts its target into a list in the surrounding
class. the data.Data class has a metaclass which executes these
statements. This is how we shove the metadata element spec into
the class.
"""
def __init__(self, target):
self.target = target
def __call__(self, *args, **kwargs):
# get the locals dictionary of the frame object one down in the call stack (i.e. the Datatype class calling MetadataElement)
class_locals = sys._getframe(1).f_locals
# get and set '__galaxy_statements__' to an empty list if not in locals dict
statements = class_locals.setdefault(STATEMENTS, [])
# add Statement containing info to populate a MetadataElementSpec
statements.append((self, args, kwargs))
@classmethod
def process(cls, element):
for statement, args, kwargs in getattr(element, STATEMENTS, []):
statement.target(
element, *args, **kwargs
) # statement.target is MetadataElementSpec, element is a Datatype class
class MetadataCollection(Mapping):
"""
MetadataCollection is not a collection at all, but rather a proxy
to the real metadata which is stored as a Dictionary. This class
handles processing the metadata elements when they are set and
retrieved, returning default values in cases when metadata is not set.
"""
def __init__(
self,
parent: Union["DatasetInstance", "NoneDataset"],
session: Optional[Union[galaxy_scoped_session, "SessionlessContext"]] = None,
) -> None:
self.parent = parent
self._session = session
# initialize dict if needed
if self.parent._metadata is None:
self.parent._metadata = {}
def get_parent(self):
if "_parent" in self.__dict__:
return self.__dict__["_parent"]()
return None
def set_parent(self, parent):
# use weakref to prevent a circular reference interfering with garbage
# collection: hda/lda (parent) <--> MetadataCollection (self) ; needs to be
# hashable, so cannot use proxy.
self.__dict__["_parent"] = weakref.ref(parent)
parent = property(get_parent, set_parent)
@property
def spec(self):
return self.parent.datatype.metadata_spec
def _object_session(self, item):
return self._session if self._session else object_session(item)
def __iter__(self) -> Iterator[Any]:
yield from self.spec.keys()
def __getitem__(self, key):
try:
self.__getattribute__(key)
except AttributeError:
try:
return self.__getattr__(key)
except Exception:
raise KeyError
# `key` is an attribute of this instance, not some metadata: raise
# KeyError to prevent e.g. `'items' in dataset.metadata` from returning
# True
# Not doing this would also break Cheetah's NameMapper._valueForName()
# since dataset.metadata['items'] would be None
raise KeyError
def __len__(self):
return len(self.spec)
def __str__(self):
return dict(self.items()).__str__()
def __bool__(self):
return bool(self.parent._metadata)
__nonzero__ = __bool__
def __getattr__(self, name):
if name in self.spec:
if name in self.parent._metadata:
return self.spec[name].wrap(self.parent._metadata[name], self._object_session(self.parent))
return self.spec[name].wrap(self.spec[name].default, self._object_session(self.parent))
if name in self.parent._metadata:
return self.parent._metadata[name]
# Instead of raising an AttributeError for non-existing metadata, we return None
return None
def __setattr__(self, name, value):
if name == "parent":
return self.set_parent(value)
elif name == "_session":
super().__setattr__(name, value)
else:
if name in self.spec:
self.parent._metadata[name] = self.spec[name].unwrap(value)
else:
self.parent._metadata[name] = value
flag_modified(self.parent, "_metadata")
def remove_key(self, name):
if name in self.parent._metadata:
del self.parent._metadata[name]
else:
log.info(f"Attempted to delete invalid key '{name}' from MetadataCollection")
def element_is_set(self, name) -> bool:
"""
check if the meta data with the given name is set, i.e.
- if the such a metadata actually exists and
- if its value differs from no_value
:param name: the name of the metadata element
:returns: True if the value differes from the no_value
False if its equal of if no metadata with the name is specified
"""
meta_val = self[name]
try:
meta_spec = self.parent.metadata.spec[name]
except KeyError:
log.debug(f"No metadata element with name '{name}' found")
return False
return meta_val != meta_spec.no_value
def get_metadata_parameter(self, name, **kwd):
if name in self.spec:
field = self.spec[name].param.get_field(getattr(self, name), self, None, **kwd)
field.value = getattr(self, name)
return field
def make_dict_copy(self, to_copy):
"""Makes a deep copy of input iterable to_copy according to self.spec"""
rval = {}
for key, value in to_copy.items():
if key in self.spec:
rval[key] = self.spec[key].param.make_copy(value, target_context=self, source_context=to_copy)
return rval
@property
def requires_dataset_id(self):
for key in self.spec:
if isinstance(self.spec[key].param, FileParameter):
return True
return False
def from_JSON_dict(self, filename=None, path_rewriter=None, json_dict=None):
dataset = self.parent
if filename is not None:
log.debug(f"loading metadata from file for: {dataset.__class__.__name__} {dataset.id}")
with open(filename) as fh:
JSONified_dict = json.load(fh)
elif json_dict is not None:
log.debug(f"loading metadata from dict for: {dataset.__class__.__name__} {dataset.id}")
if isinstance(json_dict, str):
JSONified_dict = json.loads(json_dict)
elif isinstance(json_dict, dict):
JSONified_dict = json_dict
else:
raise ValueError(f"json_dict must be either a dictionary or a string, got {type(json_dict)}.")
else:
raise ValueError("You must provide either a filename or a json_dict")
# We build a dictionary for metadata name / value pairs
# because when we copy MetadataTempFile objects we flush the datasets'
# session, but only include the newly created MetadataFile object.
# If we were to set the metadata elements in the first for loop we'd
# lose all previously set metadata elements
metadata_name_value = {}
for name, spec in self.spec.items():
if name in JSONified_dict:
from_ext_kwds = {}
external_value = JSONified_dict[name]
param = spec.param
if isinstance(param, FileParameter):
from_ext_kwds["path_rewriter"] = path_rewriter
value = param.from_external_value(external_value, dataset, **from_ext_kwds)
metadata_name_value[name] = value
elif name in dataset._metadata:
# if the metadata value is not found in our externally set metadata but it has a value in the 'old'
# metadata associated with our dataset, we'll delete it from our dataset's metadata dict
del dataset._metadata[name]
for name, value in metadata_name_value.items():
dataset._metadata[name] = value
if "__extension__" in JSONified_dict:
dataset.extension = JSONified_dict["__extension__"]
if "__validated_state__" in JSONified_dict:
dataset.validated_state = JSONified_dict["__validated_state__"]
if "__validated_state_message__" in JSONified_dict:
dataset.validated_state_message = JSONified_dict["__validated_state_message__"]
flag_modified(dataset, "_metadata")
def to_JSON_dict(self, filename=None):
meta_dict = {}
dataset_meta_dict = self.parent._metadata
for name, spec in self.spec.items():
if name in dataset_meta_dict:
meta_dict[name] = spec.param.to_external_value(dataset_meta_dict[name])
if "__extension__" in dataset_meta_dict:
meta_dict["__extension__"] = dataset_meta_dict["__extension__"]
if "__validated_state__" in dataset_meta_dict:
meta_dict["__validated_state__"] = dataset_meta_dict["__validated_state__"]
if "__validated_state_message__" in dataset_meta_dict:
meta_dict["__validated_state_message__"] = dataset_meta_dict["__validated_state_message__"]
try:
encoded_meta_dict = galaxy.model.custom_types.json_encoder.encode(meta_dict)
except Exception as e:
raise Exception(f"Failed encoding metadata dictionary: {meta_dict}") from e
if filename is None:
return encoded_meta_dict
with open(filename, "wt+") as fh:
fh.write(encoded_meta_dict)
def __getstate__(self):
# cannot pickle a weakref item (self._parent), when
# data._metadata_collection is None, it will be recreated on demand
return None
class MetadataSpecCollection(OrderedDict):
"""
A simple extension of OrderedDict which allows cleaner access to items
and allows the values to be iterated over directly as if it were a
list. append() is also implemented for simplicity and does not
"append".
"""
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
def append(self, item):
self[item.name] = item
def __getattr__(self, name):
if name not in self:
raise AttributeError
return self.get(name)
def __repr__(self):
# force elements to draw with __str__ for sphinx-apidoc
return ", ".join(item.__str__() for item in self.values())
class MetadataParameter:
def __init__(self, spec):
self.spec = spec
def get_field(self, value=None, context=None, other_values=None, **kwd):
context = context or {}
other_values = other_values or {}
return form_builder.TextField(self.spec.name, value=value)
def to_string(self, value):
return str(value)
def to_safe_string(self, value):
return sanitize_lists_to_string(self.to_string(value))
def make_copy(self, value, target_context: MetadataCollection, source_context):
return copy.deepcopy(value)
@classmethod
def marshal(cls, value):
"""
This method should/can be overridden to convert the incoming
value to whatever type it is supposed to be.
"""
return value
def validate(self, value):
"""
Throw an exception if the value is invalid.
"""
def unwrap(self, form_value):
"""
Turns a value into its storable form.
"""
value = self.marshal(form_value)
self.validate(value)
return value
def wrap(self, value, session):
"""
Turns a value into its usable form.
"""
return value
def from_external_value(self, value, parent):
"""
Turns a value read from an external dict into its value to be pushed directly into the metadata dict.
"""
return value
def to_external_value(self, value):
"""
Turns a value read from a metadata into its value to be pushed directly into the external dict.
"""
return value
class MetadataElementSpec:
"""
Defines a metadata element and adds it to the metadata_spec (which
is a MetadataSpecCollection) of datatype.
"""
def __init__(
self,
datatype,
name=None,
desc=None,
param=MetadataParameter,
default=None,
no_value=None,
visible=True,
set_in_upload=False,
**kwargs,
):
self.name = name
self.desc = desc or name
self.default = default
self.no_value = no_value
self.visible = visible
self.set_in_upload = set_in_upload
# Catch-all, allows for extra attributes to be set
self.__dict__.update(kwargs)
# set up param last, as it uses values set above
self.param = param(self)
# add spec element to the spec
datatype.metadata_spec.append(self)
# Should we validate that non-optional elements have been set ?
# (The answer is yes, but not all datatypes control optionality appropriately at this point.)
# This allows us to check that inherited MetadataElement instances from datatypes that set
# check_required_metadata have been reviewed and considered really required.
self.check_required_metadata = datatype.__dict__.get("check_required_metadata", False)
def get(self, name, default=None):
return self.__dict__.get(name, default)
def wrap(self, value, session):
"""
Turns a stored value into its usable form.
"""
return self.param.wrap(value, session)
def unwrap(self, value):
"""
Turns an incoming value into its storable form.
"""
return self.param.unwrap(value)
def __str__(self):
# TODO??: assuming param is the class of this MetadataElementSpec - add the plain class name for that
spec_dict = dict(param_class=self.param.__class__.__name__)
spec_dict.update(self.__dict__)
return "{name} ({param_class}): {desc}, defaults to '{default}'".format(**spec_dict)
# create a statement class that, when called,
# will add a new MetadataElementSpec to a class's metadata_spec
MetadataElement = Statement(MetadataElementSpec)
"""
MetadataParameter sub-classes.
"""
class SelectParameter(MetadataParameter):
def __init__(self, spec):
MetadataParameter.__init__(self, spec)
self.values = self.spec.get("values")
self.multiple = string_as_bool(self.spec.get("multiple"))
def to_string(self, value):
if value in [None, []]:
return str(self.spec.no_value)
if not isinstance(value, list):
value = [value]
return ",".join(map(str, value))
def get_field(self, value=None, context=None, other_values=None, values=None, **kwd):
context = context or {}
other_values = other_values or {}
field = form_builder.SelectField(self.spec.name, multiple=self.multiple, display=self.spec.get("display"))
if self.values:
value_list = self.values
elif values:
value_list = values
elif value:
value_list = [(v, v) for v in listify(value)]
else:
value_list = []
for val, label in value_list:
try:
if (self.multiple and val in value) or (not self.multiple and val == value):
field.add_option(label, val, selected=True)
else:
field.add_option(label, val, selected=False)
except TypeError:
field.add_option(val, label, selected=False)
return field
def wrap(self, value, session):
# do we really need this (wasteful)? - yes because we are not sure that
# all existing selects have been stored previously as lists. Also this
# will handle the case where defaults/no_values are specified and are
# single non-list values.
value = self.marshal(value)
if self.multiple:
return value
elif value:
return value[0] # single select, only return the first value
return None
@classmethod
def marshal(cls, value):
# Store select as list, even if single item
if value is None:
return []
if not isinstance(value, list):
return [value]
return value
class DBKeyParameter(SelectParameter):
def get_field(self, value=None, context=None, other_values=None, values=None, **kwd):
context = context or {}
other_values = other_values or {}
try:
values = kwd["trans"].app.genome_builds.get_genome_build_names(kwd["trans"])
except KeyError:
pass
return super().get_field(value, context, other_values, values, **kwd)
class RangeParameter(SelectParameter):
def __init__(self, spec):
SelectParameter.__init__(self, spec)
# The spec must be set with min and max values
self.min = spec.get("min") or 1
self.max = spec.get("max") or 1
self.step = self.spec.get("step") or 1
def get_field(self, value=None, context=None, other_values=None, values=None, **kwd):
context = context or {}
other_values = other_values or {}
if values is None:
values = list(zip(range(self.min, self.max, self.step), range(self.min, self.max, self.step)))
return SelectParameter.get_field(
self, value=value, context=context, other_values=other_values, values=values, **kwd
)
@classmethod
def marshal(cls, value):
value = SelectParameter.marshal(value)
values = [int(x) for x in value]
return values
class ColumnParameter(RangeParameter):
def get_field(self, value=None, context=None, other_values=None, values=None, **kwd):
context = context or {}
other_values = other_values or {}
if values is None and context:
column_range = range(1, (context.columns or 0) + 1, 1)
values = list(zip(column_range, column_range))
return RangeParameter.get_field(
self, value=value, context=context, other_values=other_values, values=values, **kwd
)
class ColumnTypesParameter(MetadataParameter):
def to_string(self, value):
return ",".join(map(str, value))
class ListParameter(MetadataParameter):
def to_string(self, value):
return ",".join(str(x) for x in value)
class DictParameter(MetadataParameter):
def to_string(self, value):
return json.dumps(value)
def to_safe_string(self, value):
# We do not sanitize json dicts
return safe_dumps(value)
class PythonObjectParameter(MetadataParameter):
def to_string(self, value):
if not value:
return self.spec._to_string(self.spec.no_value)
return self.spec._to_string(value)
def get_field(self, value=None, context=None, other_values=None, **kwd):
context = context or {}
other_values = other_values or {}
return form_builder.TextField(self.spec.name, value=self._to_string(value))
@classmethod
def marshal(cls, value):
return value
class FileParameter(MetadataParameter):
def to_string(self, value):
if not value:
return str(self.spec.no_value)
return value.file_name
def to_safe_string(self, value):
# We do not sanitize file names
return self.to_string(value)
def get_field(self, value=None, context=None, other_values=None, **kwd):
context = context or {}
other_values = other_values or {}
return form_builder.TextField(self.spec.name, value=str(value.id))
def wrap(self, value, session):
if value is None:
return None
if isinstance(value, galaxy.model.MetadataFile) or isinstance(value, MetadataTempFile):
return value
if isinstance(value, int):
return session.query(galaxy.model.MetadataFile).get(value)
else:
return session.query(galaxy.model.MetadataFile).filter_by(uuid=value).one()
def make_copy(self, value, target_context: MetadataCollection, source_context):
session = target_context._object_session(target_context.parent)
value = self.wrap(value, session=session)
target_dataset = target_context.parent.dataset
if value and not value.id:
# This is a new MetadataFile object, we're not copying to another dataset.
# Just use it.
return self.unwrap(value)
if value and target_dataset.object_store.exists(target_dataset):
# Only copy MetadataFile if the target dataset has been created in an object store.
# All current datatypes re-generate MetadataFile objects when setting metadata,
# so this would ultimately get overwritten anyway.
new_value = galaxy.model.MetadataFile(dataset=target_context.parent, name=self.spec.name)
session.add(new_value)
try:
new_value.update_from_file(value.file_name)
except AssertionError:
session(target_context.parent).flush()
new_value.update_from_file(value.file_name)
return self.unwrap(new_value)
return None
@classmethod
def marshal(cls, value):
if isinstance(value, galaxy.model.MetadataFile):
# We want to push value.id to the database, but need to skip this when no session is available,
# as in extended_metadata mode, so there we just accept MetadataFile.
# We will only serialize MetadataFile in this mode and not push to the database, so this is OK.
value = value.id or value
if not isinstance(value, int) and object_session(value):
value = str(value.uuid)
return value
def from_external_value(self, value, parent, path_rewriter=None):
"""
Turns a value read from a external dict into its value to be pushed directly into the metadata dict.
"""
if MetadataTempFile.is_JSONified_value(value):
value = MetadataTempFile.from_JSON(value)
if isinstance(value, MetadataTempFile):
mf = parent.metadata.get(self.spec.name, None)
if mf is None:
mf = self.new_file(dataset=parent, **value.kwds)
# Ensure the metadata file gets updated with content
file_name = value.file_name
if path_rewriter:
# Job may have run with a different (non-local) tmp/working
# directory. Correct.
file_name = path_rewriter(file_name)
mf.update_from_file(file_name)
os.unlink(file_name)
value = mf.id
return value
def to_external_value(self, value):
"""
Turns a value read from a metadata into its value to be pushed directly into the external dict.
"""
if isinstance(value, galaxy.model.MetadataFile):
value = value.id
elif isinstance(value, MetadataTempFile):
value = MetadataTempFile.to_JSON(value)
return value
def new_file(self, dataset=None, **kwds):
# If there is a place to store the file (i.e. an object_store has been bound to
# Dataset) then use a MetadataFile and assume it is accessible. Otherwise use
# a MetadataTempFile.
if getattr(dataset.dataset, "object_store", False):
mf = galaxy.model.MetadataFile(name=self.spec.name, dataset=dataset, **kwds)
sa_session = object_session(dataset)
if sa_session:
sa_session.add(mf)
sa_session.flush() # flush to assign id
return mf
else:
# we need to make a tmp file that is accessable to the head node,
# we will be copying its contents into the MetadataFile objects filename after restoring from JSON
# we do not include 'dataset' in the kwds passed, as from_JSON_value() will handle this for us
return MetadataTempFile(**kwds)
# This class is used when a database file connection is not available
class MetadataTempFile:
tmp_dir = "database/tmp" # this should be overwritten as necessary in calling scripts
def __init__(self, **kwds):
self.kwds = kwds
self._filename = None
@property
def file_name(self):
if self._filename is None:
# we need to create a tmp file, accessable across all nodes/heads, save the name, and return it
self._filename = abspath(tempfile.NamedTemporaryFile(dir=self.tmp_dir, prefix="metadata_temp_file_").name)
open(self._filename, "wb+") # create an empty file, so it can't be reused using tempfile
return self._filename
def to_JSON(self):
return {"__class__": self.__class__.__name__, "filename": self.file_name, "kwds": self.kwds}
@classmethod
def from_JSON(cls, json_dict):
# need to ensure our keywords are not unicode
rval = cls(**stringify_dictionary_keys(json_dict["kwds"]))
rval._filename = json_dict["filename"]
return rval
@classmethod
def is_JSONified_value(cls, value):
return isinstance(value, dict) and value.get("__class__", None) == cls.__name__
@classmethod
def cleanup_from_JSON_dict_filename(cls, filename):
try:
with open(filename) as fh:
for value in json.load(fh).values():
if cls.is_JSONified_value(value):
value = cls.from_JSON(value)
if isinstance(value, cls) and os.path.exists(value.file_name):
log.debug("Cleaning up abandoned MetadataTempFile file: %s", value.file_name)
os.unlink(value.file_name)
except Exception as e:
log.debug("Failed to cleanup MetadataTempFile temp files from %s: %s", filename, unicodify(e))
__all__ = (
"Statement",
"MetadataElement",
"MetadataCollection",
"MetadataSpecCollection",
"MetadataParameter",
"MetadataElementSpec",
"SelectParameter",
"DBKeyParameter",
"RangeParameter",
"ColumnParameter",
"ColumnTypesParameter",
"ListParameter",
"DictParameter",
"PythonObjectParameter",
"FileParameter",
"MetadataTempFile",
)
| 37.088472
| 146
| 0.640704
|
37664b6492a954b48c08041f22d6d6fd31484c72
| 4,528
|
py
|
Python
|
7GDL_Blender.py
|
Lucas-idp/Vibracoes-Python
|
ad77535a067bc5e080e5a6c7e2b59dc2c521e249
|
[
"CC0-1.0"
] | 1
|
2021-04-18T15:05:55.000Z
|
2021-04-18T15:05:55.000Z
|
7GDL_Blender.py
|
Lucas-idp/Vibracoes7GDL-Python
|
ad77535a067bc5e080e5a6c7e2b59dc2c521e249
|
[
"CC0-1.0"
] | null | null | null |
7GDL_Blender.py
|
Lucas-idp/Vibracoes7GDL-Python
|
ad77535a067bc5e080e5a6c7e2b59dc2c521e249
|
[
"CC0-1.0"
] | null | null | null |
import bpy
import csv
import numpy as np
import time
import math
#### Importing CSV files and saving to response vectors, one for each degree of freedom
fpath = 'C:/7GDL_RespostaAnim2.csv'
csvFile = csv.reader(open(fpath))
data = [row for row in csvFile][0:]
data_7gdl = []
response = []
for i in range(len(data)):
data_list = data[i]
data_str = data_list[0]
data_float = [float(x) for x in data_str.split(' ')]
if i == 0:
gdls = len(data_float)
data_7gdl.append(data_float)
for i in range(gdls):
response_col = []
for j in range(len(data)):
data_point = data_7gdl[j][i]
response_col.append(data_point)
response.append(response_col)
resp_1 = response[0]
resp_2 = response[1]
resp_3 = response[2]
resp_4 = response[3]
resp_5 = response[4]
resp_6 = response[5]
resp_7 = response[6]
#### Location of the bodies in space
L1 = (3/5)*1.7
L2 = 1.7-L1
L3 = 1.4
#### Creating mesh bodies, one cylinder for each wheel and a cube for the chassi
op1 = bpy.ops.mesh.primitive_cylinder_add(radius= 0.25, depth=0.15, enter_editmode=False, align='CURSOR', location=(L1, L3/2, 0), rotation=(math.pi/2, 0, 0), scale=(1, 1, 1))
op2 = bpy.ops.mesh.primitive_cylinder_add(radius= 0.25, depth=0.15, enter_editmode=False, align='CURSOR', location=(L1, -L3/2, 0), rotation=(math.pi/2, 0, 0), scale=(1, 1, 1))
op3 = bpy.ops.mesh.primitive_cylinder_add(radius= 0.25, depth=0.15, enter_editmode=False, align='CURSOR', location=(-L2, -L3/2, 0), rotation=(math.pi/2, 0, 0), scale=(1, 1, 1))
op4 = bpy.ops.mesh.primitive_cylinder_add(radius= 0.25, depth=0.15, enter_editmode=False, align='CURSOR', location=(-L2, L3/2, 0), rotation=(math.pi/2, 0, 0), scale=(1, 1, 1))
op5 = bpy.ops.mesh.primitive_cube_add(size=0.5, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))
#### storing the objects into variables for loop call
cylinder1 = bpy.data.collections[0].objects['Cylinder']
cylinder2 = bpy.data.collections[0].objects['Cylinder.001']
cylinder3 = bpy.data.collections[0].objects['Cylinder.002']
cylinder4 = bpy.data.collections[0].objects['Cylinder.003']
cube5 = bpy.data.collections[0].objects['Cube']
frame_num = 0
#### assembly of animation
for i in range(len(data)):
bpy.context.scene.frame_set(frame_num)
cylinder1.location = (L1, L3/2,resp_1[i]) #Changing Cylinder1's location according with it's free vibration response vector
cylinder1.keyframe_insert(data_path = "location", frame = frame_num) #Keyframe tracker for cylinder1's translation
cylinder2.location = (L1, -L3/2,resp_2[i]) #Changing Cylinder2's location according with it's free vibration response vector
cylinder2.keyframe_insert(data_path = "location", frame = frame_num) #Keyframe tracker for cylinder2's translation
cylinder3.location = (-L2, -L3/2,resp_3[i]) #Changing Cylinder3's location according with it's free vibration response vector
cylinder3.keyframe_insert(data_path = "location", frame = frame_num) #Keyframe tracker for cylinder3's translation
cylinder4.location = (-L2, L3/2,resp_4[i]) #Changing Cylinder4's location according with it's free vibration response vector
cylinder4.keyframe_insert(data_path = "location", frame = frame_num) #Keyframe tracker for cylinder4's translation
cube5.location = (0,0,L1/3 + resp_5[i]) #Changing Cubes's location according with it's free vibration response vector
#Changing Cylinder1's orientation according with it's free vibration response vector
bpy.ops.transform.rotate(value=resp_6[i], orient_axis='Y', orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False, release_confirm=True)
bpy.ops.transform.rotate(value=resp_7[i], orient_axis='X', orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False, release_confirm=True)
#Keyframe tracker for cubes's rotation
cube5.keyframe_insert(data_path = "rotation_euler", frame = frame_num)
#Keyframe tracker for cubes's translation
cube5.keyframe_insert(data_path = "location", frame = frame_num)
frame_num += 5
| 56.6
| 352
| 0.715769
|
867cf5cd6a38fdbd42de7965e9b68a208de3c382
| 4,013
|
py
|
Python
|
hokonui/exchanges/gateio.py
|
laisee/hoko
|
657a56ccf3fa0fde783753f669b24086b263c370
|
[
"MIT"
] | 2
|
2015-11-14T12:22:26.000Z
|
2019-02-15T09:00:39.000Z
|
hokonui/exchanges/gateio.py
|
laisee/hoko
|
657a56ccf3fa0fde783753f669b24086b263c370
|
[
"MIT"
] | 4
|
2017-07-28T02:58:12.000Z
|
2021-08-01T16:17:20.000Z
|
hokonui/exchanges/gateio.py
|
laisee/hoko
|
657a56ccf3fa0fde783753f669b24086b263c370
|
[
"MIT"
] | 1
|
2021-08-18T14:13:01.000Z
|
2021-08-18T14:13:01.000Z
|
''' Module for Exchange base class '''
# pylint: disable=duplicate-code, line-too-long
import time
from hokonui.exchanges.base import Exchange as Base
from hokonui.models.ticker import Ticker
from hokonui.utils.helpers import apply_format
from hokonui.utils.helpers import apply_format_level
from hokonui.utils.helpers import get_response
class GateIo(Base):
''' Class Exchange base class for all exchanges '''
ASK_URL = None
BID_URL = None
PRICE_URL = None
TICKER_URL = 'https://data.gateio.io/api2/1/ticker/btc_%s'
ORDER_BOOK_URL = 'https://data.gateio.io/api2/1/orderBook/btc_%s'
NAME = 'GateIo'
CCY_DEFAULT = 'USDT'
@classmethod
def _current_price_extractor(cls, data):
''' Method for extracting current price '''
return apply_format(data.get('last'))
@classmethod
def _current_bid_extractor(cls, data):
''' Method for extracting bid price '''
return apply_format(data.get('highestBid'))
@classmethod
def _current_ask_extractor(cls, data):
''' Method for extracting ask price '''
return apply_format(data.get('lowestAsk'))
@classmethod
def _current_orders_extractor(cls, data, max_qty=100):
''' Method for extracting orders '''
orders = {}
bids = {}
asks = {}
buymax = 0
sellmax = 0
for level in data["bids"]:
if buymax > max_qty:
pass
else:
asks[apply_format_level(level[0])] = "{:.8f}".format(
float(level[1]))
buymax = buymax + float(level[1])
for level in data["asks"]:
if sellmax > max_qty:
pass
else:
bids[apply_format_level(level[0])] = "{:.8f}".format(
float(level[1]))
sellmax = sellmax + float(level[1])
orders["source"] = cls.NAME
orders["bids"] = bids
orders["asks"] = asks
orders["timestamp"] = str(int(time.time()))
return orders
@classmethod
def _current_ticker_extractor(cls, data):
''' Method for extracting ticker '''
bid = apply_format(data.get('highestBid'))
ask = apply_format(data.get('lowestAsk'))
return Ticker(cls.CCY_DEFAULT, bid, ask).to_json()
@classmethod
def get_current_price(cls, ccy=None, params=None, body=None, header=None):
''' Method for retrieving last price '''
url = cls.PRICE_URL if hasattr(
cls, 'PRICE_URL') and cls.PRICE_URL is not None else cls.TICKER_URL
data = get_response(url, ccy, params, body, header)
return cls._current_price_extractor(data)
@classmethod
def get_current_bid(cls, ccy=None, params=None, body=None, header=None):
''' Method for retrieving current bid price '''
url = cls.BID_URL if hasattr(
cls, 'BID_URL') and cls.BID_URL is not None else cls.TICKER_URL
data = get_response(url, ccy, params, body, header)
return cls._current_bid_extractor(data)
@classmethod
def get_current_ask(cls, ccy=None, params=None, body=None, header=None):
''' Method for retrieving current ask price '''
url = cls.ASK_URL if hasattr(
cls, 'ASK_URL') and cls.ASK_URL is not None else cls.TICKER_URL
data = get_response(url, ccy, params, body, header)
return cls._current_ask_extractor(data)
@classmethod
def get_current_ticker(cls, ccy=None, params=None, body=None, header=None):
''' Method for retrieving current ticker '''
data = get_response(cls.TICKER_URL, ccy, params, body, header)
return cls._current_ticker_extractor(data)
@classmethod
def get_current_orders(cls, ccy=None, params=None, body=None, header=None):
''' Method for retrieving current orders '''
max_qty = 5
data = get_response(cls.ORDER_BOOK_URL, ccy, params, body)
return cls._current_orders_extractor(data, max_qty)
| 35.830357
| 79
| 0.628956
|
27a2d622e6cd3c4ad6bf191b8645c4f193d777f4
| 3,317
|
py
|
Python
|
radiaTest-server/server/apps/user/routes.py
|
openeuler-mirror/radiaTest
|
4a067511d6ab69f76b8dc08667b8a1f8c1c73d23
|
[
"MulanPSL-1.0"
] | null | null | null |
radiaTest-server/server/apps/user/routes.py
|
openeuler-mirror/radiaTest
|
4a067511d6ab69f76b8dc08667b8a1f8c1c73d23
|
[
"MulanPSL-1.0"
] | 1
|
2022-03-23T06:53:25.000Z
|
2022-03-23T06:53:25.000Z
|
radiaTest-server/server/apps/user/routes.py
|
openeuler-mirror/radiaTest
|
4a067511d6ab69f76b8dc08667b8a1f8c1c73d23
|
[
"MulanPSL-1.0"
] | null | null | null |
from flask import Blueprint
from flask_restful import Resource
from flask_pydantic import validate
from server.utils.db import Select
from server.utils.cla_util import ClaSignSchema
from server.utils.auth_util import auth
from server.utils.response_util import response_collect
from server.schema.user import UpdateUserSchema, JoinGroupSchema, UserQuerySchema, UserTaskSchema, UserMachineSchema
from server.schema.user import GiteeLoginSchema, LoginSchema, UpdateUserSchema, JoinGroupSchema, UserQuerySchema
from server.schema.user import UserCaseCommitSchema
from .handlers import handler_gitee_callback
from .handlers import handler_gitee_login
from .handlers import handler_register
from .handlers import handler_update_user
from .handlers import handler_user_info
from .handlers import handler_logout
from .handlers import handler_select_default_org
from .handlers import handler_add_group
from .handlers import handler_get_all
from .handlers import handler_get_user_task
from .handlers import handler_get_user_machine
from .handlers import handler_login_callback
from .handlers import handler_get_user_case_commit
gitee = Blueprint('gitee', __name__)
@gitee.route("/api/v1/gitee/oauth/callback", methods=["GET"])
def gitee_callback():
return handler_gitee_callback()
class GiteeLogin(Resource):
@validate()
def get(self, query: GiteeLoginSchema):
return handler_gitee_login(query)
class Login(Resource):
@validate()
def get(self, query: LoginSchema):
return handler_login_callback(query)
class User(Resource):
@auth.login_required()
@response_collect
@validate()
def get(self, query: UserQuerySchema):
return handler_get_all(query)
class UserItem(Resource):
@validate()
def post(self, gitee_id, body: ClaSignSchema):
return handler_register(gitee_id, body)
@auth.login_required()
@response_collect
@validate()
def put(self, gitee_id, body: UpdateUserSchema):
return handler_update_user(gitee_id, body)
@auth.login_required()
@response_collect
def get(self, gitee_id):
return handler_user_info(gitee_id)
class Logout(Resource):
@auth.login_required()
@response_collect
def delete(self):
return handler_logout()
class Org(Resource):
@auth.login_required()
@response_collect
def put(self, org_id):
return handler_select_default_org(org_id)
class Group(Resource):
@auth.login_required()
@response_collect
@validate()
def put(self, group_id, body: JoinGroupSchema):
return handler_add_group(group_id, body)
# class Token(Resource):
# @validate()
# def put(self, body: RefreshTokenSchema):
# return handler_token(body.refresh_token)
class UserTask(Resource):
@auth.login_required()
@response_collect
@validate()
def get(self, query: UserTaskSchema):
return handler_get_user_task(query)
class UserMachine(Resource):
@auth.login_required()
@response_collect
@validate()
def get(self, query: UserMachineSchema):
return handler_get_user_machine(query)
class UserCaseCommit(Resource):
@auth.login_required()
@response_collect
@validate()
def get(self, query: UserCaseCommitSchema):
return handler_get_user_case_commit(query)
| 27.87395
| 116
| 0.755502
|
da966e0701960d2493b474cd6f9dbc6c02b4d264
| 2,612
|
py
|
Python
|
setup.py
|
123456789hong/Pyhyp
|
66000ed5ebb36cfbd2fb73945b0ba34d1d418525
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
123456789hong/Pyhyp
|
66000ed5ebb36cfbd2fb73945b0ba34d1d418525
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
123456789hong/Pyhyp
|
66000ed5ebb36cfbd2fb73945b0ba34d1d418525
|
[
"BSD-3-Clause"
] | null | null | null |
import io
import re
from setuptools import find_packages
from setuptools import setup
with io.open("READMEe.rst", "rt", encoding="utf8") as f:
readme = f.read()
with io.open("src/flask/__init__.py", "rt", encoding="utf8") as f:
version = re.search(r'__version__ = "(.*?)"', f.read()).group(1)
setup(
name="Flask",
version=version,
url="https://palletsprojects.com/p/flask/",
project_urls={
"Documentation": "https://flask.palletsprojects.com/",
"Code": "https://github.com/pallets/flask",
"Issue tracker": "https://github.com/pallets/flask/issues",
},
license="BSD-3-Clause",
author="Armin Ronacher",
author_email="armin.ronacher@active-4.com",
maintainer="Pallets",
maintainer_email="contact@palletsprojects.com",
description="A simple framework for building complex web applications.",
long_description=readme,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Flask",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Topic :: Software Development :: Libraries :: Python Modules",
],
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
install_requires=[
"Werkzeug>=0.15",
"Jinja2>=2.10.1",
"itsdangerous>=0.24",
"click>=5.1",
],
extras_require={
"dotenv": ["python-dotenv"],
"dev": [
"pytest",
"coverage",
"tox",
"sphinx",
"pallets-sphinx-themes",
"sphinxcontrib-log-cabinet",
"sphinx-issues",
],
"docs": [
"sphinx",
"pallets-sphinx-themes",
"sphinxcontrib-log-cabinet",
"sphinx-issues",
],
},
entry_points={"console_scripts": ["flask = flask.cli:main"]},
)
| 33.487179
| 79
| 0.570827
|
aef20dfbfecbaa6b317e46e56de6b58bdd059724
| 2,505
|
py
|
Python
|
qiskit/extensions/qasm_simulator_cpp/snapshot.py
|
Shark-y/qiskit-sdk-py
|
c1361b823dc1a3fab76545e62975c2afb02e442d
|
[
"Apache-2.0"
] | null | null | null |
qiskit/extensions/qasm_simulator_cpp/snapshot.py
|
Shark-y/qiskit-sdk-py
|
c1361b823dc1a3fab76545e62975c2afb02e442d
|
[
"Apache-2.0"
] | 38
|
2017-08-04T09:57:36.000Z
|
2017-08-23T10:35:32.000Z
|
qiskit/extensions/qasm_simulator_cpp/snapshot.py
|
Shark-y/qiskit-sdk-py
|
c1361b823dc1a3fab76545e62975c2afb02e442d
|
[
"Apache-2.0"
] | 1
|
2017-08-18T08:22:50.000Z
|
2017-08-18T08:22:50.000Z
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
local_qiskit_simulator command to snapshot the quantum state.
"""
from qiskit import CompositeGate
from qiskit import Gate
from qiskit import QuantumCircuit
from qiskit._instructionset import InstructionSet
from qiskit._quantumregister import QuantumRegister
from qiskit.qasm import _node as node
class SnapshotGate(Gate):
"""Simulator snapshot operation."""
def __init__(self, m, qubit, circ=None):
"""Create new snapshot gate."""
super().__init__("snapshot", [m], [qubit], circ)
def qasm(self):
"""Return OPENQASM string."""
qubit = self.arg[0]
m = self.param[0]
return self._qasmif("snapshot(%d) %s[%d];" % (m,
qubit[0].name,
qubit[1]))
def inverse(self):
"""Invert this gate."""
return self # self-inverse
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.snapshot(self.param[0], self.arg[0]))
def snapshot(self, m, q):
"""Cache the quantum state of local_qiskit_simulator."""
if isinstance(q, QuantumRegister):
gs = InstructionSet()
for j in range(q.size):
gs.add(self.snapshot(m, (q, j)))
return gs
self._check_qubit(q)
return self._attach(SnapshotGate(m, q, self))
# Add to QuantumCircuit and CompositeGate classes
QuantumCircuit.snapshot = snapshot
CompositeGate.snapshot = snapshot
# cache quantum state (identity)
QuantumCircuit.definitions["snapshot"] = {
"print": True,
"opaque": False,
"n_args": 1,
"n_bits": 1,
"args": ["m"],
"bits": ["a"],
# gate snapshot(m) a { }
"body": node.GateBody([])
}
| 30.925926
| 79
| 0.623553
|
5c1ca540141f9012050e57016992edb6225e7655
| 7,720
|
py
|
Python
|
gen/argo/events/client/models/v1alpha1_sensor.py
|
argoproj-labs/argo-events-client-python
|
3d6e3dffca4a12a490c2963f4ac90c8894948bb5
|
[
"Apache-2.0"
] | null | null | null |
gen/argo/events/client/models/v1alpha1_sensor.py
|
argoproj-labs/argo-events-client-python
|
3d6e3dffca4a12a490c2963f4ac90c8894948bb5
|
[
"Apache-2.0"
] | null | null | null |
gen/argo/events/client/models/v1alpha1_sensor.py
|
argoproj-labs/argo-events-client-python
|
3d6e3dffca4a12a490c2963f4ac90c8894948bb5
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Argo Events
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argo.events.client.configuration import Configuration
class V1alpha1Sensor(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1alpha1SensorSpec',
'status': 'V1alpha1SensorStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1Sensor - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
self.metadata = metadata
self.spec = spec
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1alpha1Sensor. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1alpha1Sensor. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha1Sensor.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1alpha1Sensor. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1alpha1Sensor. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1alpha1Sensor. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha1Sensor.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1alpha1Sensor. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1alpha1Sensor. # noqa: E501
:return: The metadata of this V1alpha1Sensor. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1alpha1Sensor.
:param metadata: The metadata of this V1alpha1Sensor. # noqa: E501
:type: V1ObjectMeta
"""
if self.local_vars_configuration.client_side_validation and metadata is None: # noqa: E501
raise ValueError("Invalid value for `metadata`, must not be `None`") # noqa: E501
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1alpha1Sensor. # noqa: E501
:return: The spec of this V1alpha1Sensor. # noqa: E501
:rtype: V1alpha1SensorSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1alpha1Sensor.
:param spec: The spec of this V1alpha1Sensor. # noqa: E501
:type: V1alpha1SensorSpec
"""
if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
@property
def status(self):
"""Gets the status of this V1alpha1Sensor. # noqa: E501
:return: The status of this V1alpha1Sensor. # noqa: E501
:rtype: V1alpha1SensorStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1alpha1Sensor.
:param status: The status of this V1alpha1Sensor. # noqa: E501
:type: V1alpha1SensorStatus
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1Sensor):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1Sensor):
return True
return self.to_dict() != other.to_dict()
| 33.275862
| 312
| 0.623057
|
03d0784791a801b30a620408aab1619c639ffa0c
| 2,625
|
py
|
Python
|
Scripts/ga/perc_050/dynamic_resources_exp4.py
|
radical-experiments/campaign_manager
|
337660cf07a97933b9b516d6612353bd3f6592a8
|
[
"MIT"
] | null | null | null |
Scripts/ga/perc_050/dynamic_resources_exp4.py
|
radical-experiments/campaign_manager
|
337660cf07a97933b9b516d6612353bd3f6592a8
|
[
"MIT"
] | null | null | null |
Scripts/ga/perc_050/dynamic_resources_exp4.py
|
radical-experiments/campaign_manager
|
337660cf07a97933b9b516d6612353bd3f6592a8
|
[
"MIT"
] | null | null | null |
from radical.cm.planner import GAPlanner
from random import gauss
import pandas as pd
import numpy as np
import sys
from time import time
def df_to_lists(cmp, size):
tmp_workflows = list()
tmp_numoper = list()
for i in range(size):
point = cmp.loc[i]
workflow = {'description': None}
workflow['id'] = int(point['id'])
workflow['num_oper'] = point['num_oper']
tmp_workflows.append(workflow)
tmp_numoper.append(workflow['num_oper'])
return tmp_workflows, tmp_numoper
def resdf_to_dict(res_df, size):
tmp_resources = list()
for i in range(size):
point = res_df.loc[i]
tmp_res = {'id': int(point['id']),
'performance': 1.0}
tmp_resources.append(tmp_res)
return tmp_resources
def get_makespan(curr_plan, dyn_resources):
'''
Calculate makespan
'''
resource_usage = [0] * len(dyn_resources)
tmp_idx = [0] * len(dyn_resources)
for placement in curr_plan:
workflow = placement[0]
resource_id = placement[1]['id']
#resource_usage[resource_id - 1] += workflow['num_oper'] / gauss(1, 4900 / 76000)
resource_usage[resource_id - 1] += workflow['num_oper'] / \
dyn_resources[resource_id - 1,
tmp_idx[resource_id - 1]]
tmp_idx[resource_id - 1] += 1
return max(resource_usage)
if __name__ == "__main__":
repetitions = int(sys.argv[1])
dyn_resources = np.load('../../../Data/homogeneous_resources_dyn.npy')
total_resources = pd.read_csv('../../../Data/heterogeneous_resources.csv')
total_cmp = pd.read_csv('../../../Data/heterogeneous_campaign.csv')
num_resources = [4, 8, 16, 32, 64, 128]
results = pd.DataFrame(columns=['size','planner','plan','makespan','time'])
campaign, num_oper = df_to_lists(cmp=total_cmp, size=1024)
for res_num in num_resources:
print('Number of resources: %d' % res_num)
resources = resdf_to_dict(res_df=total_resources, size=res_num)
for _ in range(repetitions):
planner = GAPlanner(campaign=campaign, resources=resources, num_oper=num_oper, sid='test1', random_init=0.50)
tic = time()
plan = planner.plan()
toc = time()
makespan = get_makespan(plan, dyn_resources[0:res_num,:])
results.loc[len(results)]= [res_num, 'GA-50', plan, makespan, toc - tic]
del planner
results.to_csv('../../../Data/ga/perc_050/DynFixedHomoResources_StHeteroCampaignsGA50.csv', index=False)
| 35
| 121
| 0.616
|
d1ce4d64a1d8c2bf4cdee7e685a19358df002bea
| 8,939
|
py
|
Python
|
kats/tests/models/test_theta_model.py
|
prsalm/Kats
|
786a2d81193fda2ffc6d90d0e8b4d52600ce8ee7
|
[
"MIT"
] | 3,580
|
2021-06-21T03:55:17.000Z
|
2022-03-31T20:21:38.000Z
|
kats/tests/models/test_theta_model.py
|
leofionn/Kats
|
4f8599c538a8203358bd987f92ab69c74635fb58
|
[
"MIT"
] | 164
|
2021-06-22T03:00:32.000Z
|
2022-03-31T22:08:16.000Z
|
kats/tests/models/test_theta_model.py
|
leofionn/Kats
|
4f8599c538a8203358bd987f92ab69c74635fb58
|
[
"MIT"
] | 350
|
2021-06-21T19:53:47.000Z
|
2022-03-30T08:07:03.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import re
import unittest
from typing import Optional
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pandas as pd
import statsmodels
from kats.consts import TimeSeriesData
from kats.data.utils import load_data, load_air_passengers
from kats.models.theta import ThetaModel, ThetaParams
from kats.tests.models.test_models_dummy_data import (
NONSEASONAL_INPUT,
AIR_FCST_15_THETA_SM_11,
AIR_FCST_15_THETA_INCL_HIST_SM_11,
PEYTON_FCST_30_THETA_SM_11,
PEYTON_FCST_30_THETA_INCL_HIST_SM_11,
AIR_FCST_15_THETA_SM_12,
AIR_FCST_15_THETA_INCL_HIST_SM_12,
PEYTON_FCST_30_THETA_SM_12,
PEYTON_FCST_30_THETA_INCL_HIST_SM_12,
)
from pandas.util.testing import assert_frame_equal, assert_series_equal
from parameterized.parameterized import parameterized
statsmodels_ver = float(
re.findall("([0-9]+\\.[0-9]+)\\..*", statsmodels.__version__)[0]
)
TEST_DATA = {
"short": {
"ts": TimeSeriesData(
pd.DataFrame(
{
"time": [
pd.Timestamp("1961-01-01 00:00:00"),
pd.Timestamp("1961-02-01 00:00:00"),
],
"y": [1.0, 2.0],
}
)
),
"params": ThetaParams(m=2),
},
"constant": {
"ts": TimeSeriesData(
pd.DataFrame(
{"time": pd.date_range("1960-12-01", "1963-01-01", freq="m"), "y": 10.0}
)
),
"params": ThetaParams(m=2),
},
"nonseasonal": {
"ts": TimeSeriesData(NONSEASONAL_INPUT),
"params": ThetaParams(m=4),
},
"daily": {
"ts": TimeSeriesData(
load_data("peyton_manning.csv").set_axis(["time", "y"], axis=1)
),
"params": ThetaParams(),
"params_negative": ThetaParams(m=-5),
},
"monthly": {
"ts": load_air_passengers(),
"params": ThetaParams(m=12),
},
"multivariate": {
"ts": TimeSeriesData(load_data("multivariate_anomaly_simulated_data.csv"))
},
}
class ThetaModelTest(TestCase):
def test_params(self) -> None:
# Test default value
params = ThetaParams()
params.validate_params()
self.assertEqual(params.m, 1)
params = ThetaParams(m=12)
self.assertEqual(params.m, 12)
@parameterized.expand(
[
[
"monthly",
TEST_DATA["monthly"]["ts"],
TEST_DATA["monthly"]["params"],
15,
0.05,
False,
None,
(
AIR_FCST_15_THETA_SM_11
if statsmodels_ver < 0.12
else AIR_FCST_15_THETA_SM_12
),
],
[
"monthly, include history",
TEST_DATA["monthly"]["ts"],
TEST_DATA["monthly"]["params"],
15,
0.05,
True,
None,
(
AIR_FCST_15_THETA_INCL_HIST_SM_11
if statsmodels_ver < 0.12
else AIR_FCST_15_THETA_INCL_HIST_SM_12
),
],
[
"daily",
TEST_DATA["daily"]["ts"],
TEST_DATA["daily"]["params"],
30,
0.05,
False,
None,
(
PEYTON_FCST_30_THETA_SM_11
if statsmodels_ver < 0.12
else PEYTON_FCST_30_THETA_SM_12
),
],
[
"daily, include history",
TEST_DATA["daily"]["ts"],
TEST_DATA["daily"]["params_negative"],
30,
0.05,
True,
None,
(
PEYTON_FCST_30_THETA_INCL_HIST_SM_11
if statsmodels_ver < 0.12
else PEYTON_FCST_30_THETA_INCL_HIST_SM_12
),
],
]
)
def test_forecast(
self,
testcase_name: str,
ts: TimeSeriesData,
params: ThetaParams,
steps: int,
alpha: float,
include_history: bool,
freq: Optional[str],
truth: pd.DataFrame,
) -> None:
np.random.seed(0)
m = ThetaModel(data=ts, params=params)
m.fit()
forecast_df = m.predict(
steps=steps, alpha=alpha, include_history=include_history, freq=freq
)
assert_frame_equal(forecast_df, truth, check_exact=False)
@parameterized.expand(
[
[
"m less than 1",
TEST_DATA["daily"]["ts"],
TEST_DATA["daily"]["params_negative"],
False,
],
[
"data too short",
TEST_DATA["short"]["ts"],
TEST_DATA["short"]["params"],
False,
],
[
"constant data",
TEST_DATA["constant"]["ts"],
TEST_DATA["constant"]["params"],
False,
],
[
"seasonal",
TEST_DATA["monthly"]["ts"],
TEST_DATA["monthly"]["params"],
True,
],
]
)
def test_check_seasonality(
self,
testcase_name: str,
ts: TimeSeriesData,
params: ThetaParams,
is_seasonal: bool,
) -> None:
m = ThetaModel(ts, params)
m.check_seasonality()
self.assertEqual(m.seasonal, is_seasonal)
@parameterized.expand(
[
[
"nonseasonal",
False,
TEST_DATA["nonseasonal"]["ts"],
TEST_DATA["nonseasonal"]["params"],
False,
True,
],
[
"seasonal",
True,
TEST_DATA["monthly"]["ts"],
TEST_DATA["monthly"]["params"],
True,
False,
],
]
)
def test_deseasonalize(
self,
testcase_name: str,
seasonal: bool,
ts: TimeSeriesData,
params: ThetaParams,
seasonality_removed: bool,
decomp_is_none: bool,
) -> None:
m = ThetaModel(ts, params)
m.seasonal = seasonal
deseas_data = m.deseasonalize()
if seasonality_removed:
self.assertFalse(ts.value.equals(deseas_data.value))
else:
assert_series_equal(deseas_data.value, ts.value)
self.assertEqual(decomp_is_none, m.decomp is None)
def test_multivar(self) -> None:
# Theta model does not support multivariate time data
self.assertRaises(
ValueError,
ThetaModel,
TEST_DATA["multivariate"]["ts"],
ThetaParams(),
)
def test_exec_plot(self):
m = ThetaModel(TEST_DATA["daily"]["ts"], TEST_DATA["daily"]["params"])
m.fit()
m.predict(steps=15, alpha=0.05)
m.plot()
def test_name(self):
m = ThetaModel(TEST_DATA["daily"]["ts"], TEST_DATA["daily"]["params"])
self.assertEqual(m.__str__(), "Theta")
def test_search_space(self):
self.assertEqual(
ThetaModel.get_parameter_search_space(),
[
{
"name": "m",
"type": "choice",
"values": list(range(1, 31)),
"value_type": "int",
"is_ordered": True,
},
],
)
def test_others(self) -> None:
m = ThetaModel(TEST_DATA["daily"]["ts"], TEST_DATA["daily"]["params"])
# fit must be called before predict
self.assertRaises(ValueError, m.predict, 30)
# seasonal data must be deseasonalized before fit
with patch.object(
m, "deseasonalize", (lambda self: self.data).__get__(m)
), patch.object(m, "check_seasonality"):
m.n = None
m.seasonal = True
m.decomp = None
self.assertRaises(ValueError, m.fit)
with patch(
"kats.utils.decomposition.TimeSeriesDecomposition.decomposer",
return_value={"seasonal": TEST_DATA["daily"]["ts"] * 0},
):
# Don't deseasonalize if any seasonal index = 0
deseas_data = m.deseasonalize()
assert_series_equal(deseas_data.value, TEST_DATA["daily"]["ts"].value)
if __name__ == "__main__":
unittest.main()
| 28.928803
| 88
| 0.497035
|
d7767020504d6b37195782efe6cea9ae2c6870c0
| 1,302
|
py
|
Python
|
trel/migrations/0002_complaint.py
|
alsoncahyadi/julid-be
|
9bb63882cab637329b85d35da874ea1c03180cdd
|
[
"MIT"
] | null | null | null |
trel/migrations/0002_complaint.py
|
alsoncahyadi/julid-be
|
9bb63882cab637329b85d35da874ea1c03180cdd
|
[
"MIT"
] | null | null | null |
trel/migrations/0002_complaint.py
|
alsoncahyadi/julid-be
|
9bb63882cab637329b85d35da874ea1c03180cdd
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-02-14 09:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trel', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Complaint',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='Complaint ID')),
('text', models.TextField(verbose_name='Complaint Text')),
('state', models.IntegerField(verbose_name='State')),
('category', models.CharField(max_length=10, verbose_name='Category')),
('post_id', models.CharField(max_length=35, verbose_name='Instagram Post ID')),
('comment_id', models.CharField(max_length=20, verbose_name='Instagram Post ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('ready_at', models.DateTimeField()),
('wip_at', models.DateTimeField()),
('resolved_at', models.DateTimeField()),
],
options={
'verbose_name': 'Complaint',
'verbose_name_plural': 'Complaints',
},
),
]
| 38.294118
| 105
| 0.566052
|
f13a861be96cbaaf2871c8a8e46cd1a5fde5b80c
| 10,069
|
py
|
Python
|
scripts/listpages.py
|
zkhalido/pywikibotGerritMirror
|
b7775cb150c88e05fbfe66f2a90ace5bd08a7344
|
[
"MIT"
] | 1
|
2019-10-02T19:25:38.000Z
|
2019-10-02T19:25:38.000Z
|
scripts/listpages.py
|
mridullpandey/pywikibot
|
6c9280bea615ad457fd79c01bf1d2cab4c6bf220
|
[
"MIT"
] | 2
|
2019-11-07T13:46:32.000Z
|
2019-11-07T14:20:53.000Z
|
scripts/listpages.py
|
mridullpandey/pywikibot
|
6c9280bea615ad457fd79c01bf1d2cab4c6bf220
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
Print a list of pages, as defined by page generator parameters.
Optionally, it also prints page content to STDOUT or save it to a file
in the current directory.
These parameters are supported to specify which pages titles to print:
-format Defines the output format.
Can be a custom string according to python string.format() notation
or can be selected by a number from following list
(1 is default format):
1 - '{num:4d} {page.title}'
--> 10 PageTitle
2 - '{num:4d} [[{page.title}]]'
--> 10 [[PageTitle]]
3 - '{page.title}'
--> PageTitle
4 - '[[{page.title}]]'
--> [[PageTitle]]
5 - '{num:4d} \03{{lightred}}{page.loc_title:<40}\03{{default}}'
--> 10 localised_Namespace:PageTitle (colorised in lightred)
6 - '{num:4d} {page.loc_title:<40} {page.can_title:<40}'
--> 10 localised_Namespace:PageTitle
canonical_Namespace:PageTitle
7 - '{num:4d} {page.loc_title:<40} {page.trs_title:<40}'
--> 10 localised_Namespace:PageTitle
outputlang_Namespace:PageTitle
(*) requires "outputlang:lang" set.
num is the sequential number of the listed page.
An empty format is equal to -notitle and just shows the total
amount of pages.
-outputlang Language for translation of namespaces.
-notitle Page title is not printed.
-get Page content is printed.
-save Save Page content to a file named as page.title(as_filename=True).
Directory can be set with -save:dir_name
If no dir is specified, current directory will be used.
-encode File encoding can be specified with '-encode:name' (name must be
a valid python encoding: utf-8, etc.).
If not specified, it defaults to config.textfile_encoding.
-put: Save the list to the defined page of the wiki. By default it does
not overwrite an existing page.
-overwrite Overwrite the page if it exists. Can only by applied with -put.
-summary: The summary text when the page is written. If it's one word just
containing letters, dashes and underscores it uses that as a
translation key.
Custom format can be applied to the following items extrapolated from a
page object:
site: obtained from page._link._site.
title: obtained from page._link._title.
loc_title: obtained from page._link.canonical_title().
can_title: obtained from page._link.ns_title().
based either the canonical namespace name or on the namespace name
in the language specified by the -trans param;
a default value '******' will be used if no ns is found.
onsite: obtained from pywikibot.Site(outputlang, self.site.family).
trs_title: obtained from page._link.ns_title(onsite=onsite).
If selected format requires trs_title, outputlang must be set.
¶ms;
"""
#
# (C) Pywikibot team, 2008-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import os
import re
import pywikibot
from pywikibot import config2 as config, i18n
from pywikibot.pagegenerators import GeneratorFactory, parameterHelp
docuReplacements = {'¶ms;': parameterHelp} # noqa: N816
class Formatter(object):
"""Structure with Page attributes exposed for formatting from cmd line."""
fmt_options = {
'1': '{num:4d} {page.title}',
'2': '{num:4d} [[{page.title}]]',
'3': '{page.title}',
'4': '[[{page.title}]]',
'5': '{num:4d} \03{{lightred}}{page.loc_title:<40}\03{{default}}',
'6': '{num:4d} {page.loc_title:<40} {page.can_title:<40}',
'7': '{num:4d} {page.loc_title:<40} {page.trs_title:<40}',
}
# Identify which formats need outputlang
fmt_need_lang = [k for k, v in fmt_options.items() if 'trs_title' in v]
def __init__(self, page, outputlang=None, default='******'):
"""
Initializer.
@param page: the page to be formatted.
@type page: Page object.
@param outputlang: language code in which namespace before title should
be translated.
Page ns will be searched in Site(outputlang, page.site.family)
and, if found, its custom name will be used in page.title().
@type outputlang: str or None, if no translation is wanted.
@param default: default string to be used if no corresponding
namespace is found when outputlang is not None.
"""
self.site = page._link.site
self.title = page._link.title
self.loc_title = page._link.canonical_title()
self.can_title = page._link.ns_title()
self.outputlang = outputlang
if outputlang is not None:
# Cache onsite in case of translations.
if not hasattr(self, 'onsite'):
self.onsite = pywikibot.Site(outputlang, self.site.family)
try:
self.trs_title = page._link.ns_title(onsite=self.onsite)
# Fallback if no corresponding namespace is found in onsite.
except pywikibot.Error:
self.trs_title = '{0}:{1}'.format(default, page._link.title)
def output(self, num=None, fmt=1):
"""Output formatted string."""
fmt = self.fmt_options.get(fmt, fmt)
# If selected format requires trs_title, outputlang must be set.
if (fmt in self.fmt_need_lang
or 'trs_title' in fmt
and self.outputlang is None):
raise ValueError(
"Required format code needs 'outputlang' parameter set.")
if num is None:
return fmt.format(page=self)
else:
return fmt.format(num=num, page=self)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
notitle = False
fmt = '1'
outputlang = None
page_get = False
base_dir = None
encoding = config.textfile_encoding
page_target = None
overwrite = False
summary = 'listpages-save-list'
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
gen_factory = GeneratorFactory()
for arg in local_args:
option, sep, value = arg.partition(':')
if option == '-notitle':
notitle = True
elif option == '-format':
fmt = value.replace('\\03{{', '\03{{')
if not fmt.strip():
notitle = True
elif option == '-outputlang':
outputlang = value
elif option == '-get':
page_get = True
elif option == '-save':
base_dir = value or '.'
elif option == '-encode':
encoding = value
elif option == '-put':
page_target = value
elif option == '-overwrite':
overwrite = True
elif option == '-summary':
summary = value
else:
gen_factory.handleArg(arg)
if base_dir:
base_dir = os.path.expanduser(base_dir)
if not os.path.isabs(base_dir):
base_dir = os.path.normpath(os.path.join(os.getcwd(), base_dir))
if not os.path.exists(base_dir):
pywikibot.output('Directory "{0}" does not exist.'
.format(base_dir))
choice = pywikibot.input_yn(
'Do you want to create it ("No" to continue without saving)?')
if choice:
os.makedirs(base_dir, mode=0o744)
else:
base_dir = None
elif not os.path.isdir(base_dir):
# base_dir is a file.
pywikibot.warning('Not a directory: "{0}"\n'
'Skipping saving ...'
.format(base_dir))
base_dir = None
if page_target:
site = pywikibot.Site()
page_target = pywikibot.Page(site, page_target)
if not overwrite and page_target.exists():
pywikibot.bot.suggest_help(
additional_text='Page {0} already exists.\n'
'You can use the -overwrite argument to '
'replace the content of this page.'
.format(page_target.title(as_link=True)))
return False
if re.match('[a-z_-]+$', summary):
summary = i18n.twtranslate(site, summary)
gen = gen_factory.getCombinedGenerator()
if gen:
i = 0
output_list = []
for i, page in enumerate(gen, start=1):
if not notitle:
page_fmt = Formatter(page, outputlang)
output_list += [page_fmt.output(num=i, fmt=fmt)]
if page_get:
if output_list:
pywikibot.stdout(output_list.pop(-1))
try:
pywikibot.stdout(page.text)
except pywikibot.Error as err:
pywikibot.output(err)
if base_dir:
filename = os.path.join(base_dir, page.title(as_filename=True))
pywikibot.output('Saving {0} to {1}'
.format(page.title(), filename))
with open(filename, mode='wb') as f:
f.write(page.text.encode(encoding))
text = '\n'.join(output_list)
if page_target:
page_target.text = text
page_target.save(summary=summary)
pywikibot.stdout(text)
pywikibot.output('{0} page(s) found'.format(i))
return True
else:
pywikibot.bot.suggest_help(missing_generator=True)
return False
if __name__ == '__main__':
main()
| 34.961806
| 79
| 0.582282
|
54e8eefedfd020857203a9e58a1318df9611c157
| 2,216
|
py
|
Python
|
chatbot.py
|
Soosai007/Python-projects-
|
3cd61e7019a77ccf46a7805827bef2ac6371af3a
|
[
"MIT"
] | null | null | null |
chatbot.py
|
Soosai007/Python-projects-
|
3cd61e7019a77ccf46a7805827bef2ac6371af3a
|
[
"MIT"
] | null | null | null |
chatbot.py
|
Soosai007/Python-projects-
|
3cd61e7019a77ccf46a7805827bef2ac6371af3a
|
[
"MIT"
] | null | null | null |
from nltk.chat.util import Chat, reflections
pairs = [
[
r"my name is (.*)",
["Hello %1, How are you today ?",]
],
[
r"what is your name ?",
["My name is Chatty and I'm a chatbot ?",]
],
[
r"how are you ?",
["I'm doing good\nHow about You ?",]
],
[
r"sorry (.*)",
["Its alright","Its OK, never mind",]
],
[
r"i'm (.*) doing good",
["Nice to hear that","Alright :)",]
],
[
r"hi|hey|hello",
["Hello", "Hey there",]
],
[
r"(.*) age?",
["I'm a computer program dude\nSeriously you are asking me this?",]
],
[
r"what (.*) want ?",
["Make me an offer I can't refuse",]
],
[
r"(.*) created ?",
["Subash created me using Python's NLTK library ","top secret ;)",]
],
[
r"(.*) (location|city) ?",
['Chennai, Tamil Nadu',]
],
[
r"how is weather in (.*)?",
["Weather in %1 is awesome like always","Too hot man here in %1","Too cold man here in %1","Never even heard about %1"]
],
[
r"i work in (.*)?",
["%1 is an Amazing company, I have heard about it.",]
],
[
r"(.*)raining in (.*)",
["No rain since last week here in %2","Damn its raining too much here in %2"]
],
[
r"how (.*) health(.*)",
["I'm a computer program, so I'm always healthy ",]
],
[
r"(.*) (sports|game) ?",
["I'm a very big fan of Football",]
],
[
r"who (.*) sportsperson ?",
["Messy","Ronaldo","Roony"]
],
[
r"who (.*) (moviestar|actor)?",
["Brad Pitt"]
],
[
r"quit",
["BBye take care. See you soon :) ","It was nice talking to you. See you soon :)"]
],
]
def chatty():
print("Hi, I'm Chatty and I chat alot ;)\nPlease type lowercase English language to start a conversation. Type quit to leave ") #default message at the start
chat = Chat(pairs, reflections)
chat.converse()
if __name__ == "__main__":
chatty()
| 26.070588
| 166
| 0.449007
|
87b60e49e93365ad32b68fdf91a2d44478595b14
| 4,310
|
py
|
Python
|
tests/test_utils.py
|
EthanCarragher/anesthetic
|
b577d4ca415292e8875e2afc3a9a97d6b1a4b931
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
EthanCarragher/anesthetic
|
b577d4ca415292e8875e2afc3a9a97d6b1a4b931
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
EthanCarragher/anesthetic
|
b577d4ca415292e8875e2afc3a9a97d6b1a4b931
|
[
"MIT"
] | null | null | null |
import warnings
import numpy as np
from scipy import special as sp
from numpy.testing import assert_array_equal
from anesthetic import NestedSamples
from anesthetic.utils import (nest_level, compute_nlive, unique, is_int,
logsumexp, sample_compression_1d,
triangular_sample_compression_2d,
insertion_p_value)
def test_nest_level():
assert(nest_level(0) == 0)
assert(nest_level([]) == 1)
assert(nest_level(['a']) == 1)
assert(nest_level(['a', 'b']) == 1)
assert(nest_level([['a'], 'b']) == 2)
assert(nest_level(['a', ['b']]) == 2)
assert(nest_level([['a'], ['b']]) == 2)
def test_compute_nlive():
# Generate a 'pure' nested sampling run
np.random.seed(0)
nlive = 500
ncompress = 100
logL = np.cumsum(np.random.rand(nlive, ncompress), axis=1)
logL_birth = np.concatenate((np.ones((nlive, 1))*-1e30, logL[:, :-1]),
axis=1)
i = np.argsort(logL.flatten())
logL = logL.flatten()[i]
logL_birth = logL_birth.flatten()[i]
# Compute nlive
nlives = compute_nlive(logL, logL_birth)
# Check the first half are constant
assert_array_equal(nlives[:len(nlives)//2], nlive)
# Check one point at the end
assert(nlives[-1] == 1)
# Check never more than nlive
assert(nlives.max() <= nlive)
def test_unique():
assert(unique([3, 2, 1, 4, 1, 3]) == [3, 2, 1, 4])
def test_triangular_sample_compression_2d():
np.random.seed(0)
n = 5000
x = np.random.rand(n)
y = np.random.rand(n)
w = np.random.rand(n)
cov = np.identity(2)
tri, W = triangular_sample_compression_2d(x, y, cov, w)
assert len(W) == 1000
assert np.isclose(sum(W), sum(w), rtol=1e-1)
def test_sample_compression_1d():
np.random.seed(0)
N = 10000
x_ = np.random.rand(N)
w_ = np.random.rand(N)
n = 1000
x, w = sample_compression_1d(x_, w_, n)
assert len(x) == n
assert len(w) == n
assert np.isclose(w.sum(), w_.sum())
def test_is_int():
assert is_int(1)
assert is_int(np.int64(1))
assert not is_int(1.)
assert not is_int(np.float64(1.))
def test_logsumexpinf():
np.random.seed(0)
a = np.random.rand(10)
b = np.random.rand(10)
assert logsumexp(-np.inf, b=[-np.inf]) == -np.inf
assert logsumexp(a, b=b) == sp.logsumexp(a, b=b)
a[0] = -np.inf
assert logsumexp(a, b=b) == sp.logsumexp(a, b=b)
b[0] = -np.inf
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'invalid value encountered in multiply',
RuntimeWarning)
assert np.isnan(sp.logsumexp(a, b=b))
assert np.isfinite(logsumexp(a, b=b))
def test_insertion_p_value():
np.random.seed(3)
nlive = 500
ndead = nlive*20
indexes = np.random.randint(0, nlive, ndead)
ks_results = insertion_p_value(indexes, nlive)
assert 'D' in ks_results
assert 'p-value' in ks_results
assert 'sample_size' in ks_results
assert 'iterations' not in ks_results
assert 'nbatches' not in ks_results
assert 'p_value_uncorrected' not in ks_results
assert ks_results['p-value'] > 0.05
assert ks_results['sample_size'] == ndead
ks_results = insertion_p_value(indexes, nlive, 1)
assert 'D' in ks_results
assert 'p-value' in ks_results
assert 'sample_size' in ks_results
assert 'iterations' in ks_results
assert 'nbatches' in ks_results
assert 'uncorrected p-value' in ks_results
assert ks_results['p-value'] > 0.05
assert ks_results['uncorrected p-value'] < ks_results['p-value']
iterations = ks_results['iterations']
assert isinstance(iterations, tuple)
assert len(iterations) == 2
assert iterations[1] - iterations[0] == nlive
assert ks_results['nbatches'] == 20
def test_p_values_from_sample():
np.random.seed(3)
ns = NestedSamples(root='./tests/example_data/pc')
ns._compute_insertion_indexes()
nlive = len(ns.live_points())
ks_results = insertion_p_value(ns.insertion[nlive:-nlive], nlive)
assert ks_results['p-value'] > 0.05
ks_results = insertion_p_value(ns.insertion[nlive:-nlive], nlive, batch=1)
assert ks_results['p-value'] > 0.05
| 29.724138
| 78
| 0.630626
|
61690278a2f2153006aa3277c470a065632ba0ea
| 4,320
|
py
|
Python
|
tests/cupy_tests/statics_tests/test_meanvar.py
|
keisuke-umezawa/cupy
|
d4070ad0f5bd830638419e70e3f931e7b7d718f7
|
[
"BSD-3-Clause"
] | 1
|
2020-11-24T03:44:35.000Z
|
2020-11-24T03:44:35.000Z
|
tests/cupy_tests/statics_tests/test_meanvar.py
|
keisuke-umezawa/cupy
|
d4070ad0f5bd830638419e70e3f931e7b7d718f7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/cupy_tests/statics_tests/test_meanvar.py
|
keisuke-umezawa/cupy
|
d4070ad0f5bd830638419e70e3f931e7b7d718f7
|
[
"BSD-3-Clause"
] | 1
|
2020-11-24T03:44:35.000Z
|
2020-11-24T03:44:35.000Z
|
import unittest
from cupy import testing
@testing.gpu
class TestMeanVar(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_mean_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.mean()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_external_mean_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.mean(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_mean_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.mean(axis=1)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_external_mean_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.mean(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_var_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.var()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_var_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.var(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_var_all_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.var(ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_var_all_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.var(a, ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_var_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.var(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_var_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.var(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_var_axis_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.var(axis=1, ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_var_axis_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.var(a, axis=1, ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_std_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.std()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_std_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.std(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_std_all_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.std(ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_std_all_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.std(a, ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_std_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.std(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_std_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.std(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_std_axis_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.std(axis=1, ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_std_axis_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.std(a, axis=1, ddof=1)
| 33.230769
| 55
| 0.661343
|
8911bb3b9cba3d7a49d9ae2bcb14dcaf898ccbf4
| 503
|
py
|
Python
|
Code/test.py
|
yash3108/ASL_gest_detect
|
a6aaf6e2f422d764b096130c242c3e7091b78c3f
|
[
"MIT"
] | null | null | null |
Code/test.py
|
yash3108/ASL_gest_detect
|
a6aaf6e2f422d764b096130c242c3e7091b78c3f
|
[
"MIT"
] | null | null | null |
Code/test.py
|
yash3108/ASL_gest_detect
|
a6aaf6e2f422d764b096130c242c3e7091b78c3f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 4 11:50:45 2020
@author: shahy_mxyzd8u
"""
import urllib.request
import cv2
import numpy as np
import time
url='http://192.168.100.8:8080/shot.jpg'
while True:
#imgResp=urllib.request.urlopen(url)
#imgNp=np.array(bytearray(imgResp.read()),dtype=np.uint8)
#img=cv2.imdecode(imgNp,-1)
#cv2.imshow('IPWebcam',img)
cam = cv2.VideoCapture('http://192.168.0.101:4747/mjpegfeed')
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| 21.869565
| 62
| 0.656064
|
8cca8cbeed7e2897095d1584a978514ffd02c7a0
| 3,934
|
py
|
Python
|
mro/stages/vloupe/vloupe_preprocess/__init__.py
|
qiangli/cellranger
|
046e24c3275cfbd4516a6ebc064594513a5c45b7
|
[
"MIT"
] | 1
|
2019-03-29T04:05:58.000Z
|
2019-03-29T04:05:58.000Z
|
mro/stages/vloupe/vloupe_preprocess/__init__.py
|
qiangli/cellranger
|
046e24c3275cfbd4516a6ebc064594513a5c45b7
|
[
"MIT"
] | null | null | null |
mro/stages/vloupe/vloupe_preprocess/__init__.py
|
qiangli/cellranger
|
046e24c3275cfbd4516a6ebc064594513a5c45b7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2017 10X Genomics, Inc. All rights reserved.
#
import os.path
import martian
import subprocess
import tenkit.log_subprocess as tk_subproc
__MRO__ = """
stage VLOUPE_PREPROCESS(
in string pipestance_type,
in string sample_id,
in string sample_desc,
in bam concat_ref_bam,
in bam.bai concat_ref_bam_bai,
in fasta concat_ref_fasta,
in fasta.fai concat_ref_fasta_fai,
in json concat_ref_annotations_json,
in csv clonotypes_csv,
in bam consensus_bam,
in bam.bai consensus_bam_bai,
in json consensus_annotations_json,
in fasta consensus_fasta,
in fasta.fai consensus_fasta_fai,
in string contig_bam_relative_path,
in bam.bai contig_bam_bai,
in json contig_annotations_json,
in bed contig_annotations_bed,
in fasta contig_fasta,
in fasta.fai contig_fasta_fai,
in csv metrics_csv,
out vloupe output_for_vloupe,
src py "stages/vloupe/vloupe_preprocess",
)
"""
def split(args):
""" This exists only for the mem request """
return {
'join' : {'__mem_gb': 10}
}
def main(args, outs):
""" Dummy main """
return
def join(args, outs, chunk_defs, chunk_outs):
"""
Run the vlconverter executable with inputs that should be available in the outs
folder at the end of the pipeline run. This will generate "output_for_vloupe.vloupe"
in the stage folder.
Memory usage not expected to be excessive with this (thus no custom split/join
as of yet); it will need to load a few full files (bam.bai, fasta.fai) into memory.
"""
if args.concat_ref_bam is None or not os.path.isfile(args.concat_ref_bam) or \
args.consensus_bam is None or not os.path.isfile(args.consensus_bam) or \
args.contig_bam_bai is None or not os.path.isfile(args.contig_bam_bai):
martian.log_info('One or more bam files missing - cannot make vloupe file')
return
call = ["vlconverter",
args.sample_id,
args.pipestance_type,
"--output", outs.output_for_vloupe,
"--reference-bam", args.concat_ref_bam,
"--reference-bam-index", args.concat_ref_bam_bai,
"--reference-fasta", args.concat_ref_fasta,
"--reference-fasta-index", args.concat_ref_fasta_fai,
"--reference-annotations", args.concat_ref_annotations_json,
"--clonotypes", args.clonotypes_csv,
"--consensus-bam", args.consensus_bam,
"--consensus-bam-index", args.consensus_bam_bai,
"--consensus-annotations", args.consensus_annotations_json,
"--consensus-fasta", args.consensus_fasta,
"--consensus-fasta-index", args.consensus_fasta_fai,
"--contig-bam-relative-path", args.contig_bam_relative_path,
"--contig-bam-index", args.contig_bam_bai,
"--contig-annotations", args.contig_annotations_json,
"--contig-bed", args.contig_annotations_bed,
"--contig-fasta", args.contig_fasta,
"--contig-fasta-index", args.contig_fasta_fai,
"--description", args.sample_desc]
# the sample desc may be unicode, so send the whole
# set of args str utf-8 to check_output
unicode_call = [arg.encode('utf-8') for arg in call]
# but keep the arg 'call' here because log_info inherently
# attempts to encode the message... (TODO: should log_info
# figure out the encoding of the input string)
martian.log_info("Running vlconverter: %s" % " ".join(call))
try:
results = tk_subproc.check_output(unicode_call)
martian.log_info("vlconverter output: %s" % results)
except subprocess.CalledProcessError, e:
outs.output_for_vloupe = None
martian.throw("Could not generate .vloupe file: \n%s" % e.output)
| 38.950495
| 89
| 0.657855
|
066b160ce80556966eaa6a68ac60db3f69984bb6
| 14,410
|
py
|
Python
|
ci/ci/ci.py
|
tpoterba/hail
|
8b14a519165f39d9ea7cf29545410622b6ae59c9
|
[
"MIT"
] | 1
|
2022-01-03T13:46:08.000Z
|
2022-01-03T13:46:08.000Z
|
ci/ci/ci.py
|
tpoterba/hail
|
8b14a519165f39d9ea7cf29545410622b6ae59c9
|
[
"MIT"
] | 2
|
2016-08-12T18:38:24.000Z
|
2018-09-05T15:26:35.000Z
|
ci/ci/ci.py
|
tpoterba/hail
|
8b14a519165f39d9ea7cf29545410622b6ae59c9
|
[
"MIT"
] | null | null | null |
import traceback
import json
import os
import logging
import asyncio
import concurrent.futures
import aiohttp
from aiohttp import web
import aiohttp_session
import uvloop
from gidgethub import aiohttp as gh_aiohttp, routing as gh_routing, sansio as gh_sansio
from hailtop.utils import collect_agen, humanize_timedelta_msecs
from hailtop.batch_client.aioclient import BatchClient
from hailtop.config import get_deploy_config
from gear import setup_aiohttp_session, \
rest_authenticated_developers_only, web_authenticated_developers_only, \
check_csrf_token, AccessLogger, create_database_pool
from web_common import setup_aiohttp_jinja2, setup_common_static_routes, render_template, \
set_message
from .constants import BUCKET
from .github import Repo, FQBranch, WatchedBranch, UnwatchedBranch
with open(os.environ.get('HAIL_CI_OAUTH_TOKEN', 'oauth-token/oauth-token'), 'r') as f:
oauth_token = f.read().strip()
log = logging.getLogger('ci')
uvloop.install()
deploy_config = get_deploy_config()
watched_branches = [
WatchedBranch(index, FQBranch.from_short_str(bss), deployable)
for (index, [bss, deployable]) in enumerate(json.loads(os.environ.get('HAIL_WATCHED_BRANCHES', '[]')))
]
routes = web.RouteTableDef()
@routes.get('')
@routes.get('/')
@web_authenticated_developers_only()
async def index(request, userdata): # pylint: disable=unused-argument
app = request.app
dbpool = app['dbpool']
wb_configs = []
for i, wb in enumerate(watched_branches):
if wb.prs:
pr_configs = []
for pr in wb.prs.values():
batch_id = pr.batch.id if pr.batch and hasattr(pr.batch, 'id') else None
build_state = pr.build_state if await pr.authorized(dbpool) else 'unauthorized'
if build_state is None and batch_id is not None:
build_state = 'building'
pr_config = {
'number': pr.number,
'title': pr.title,
# FIXME generate links to the merge log
'batch_id': pr.batch.id if pr.batch and hasattr(pr.batch, 'id') else None,
'build_state': build_state,
'review_state': pr.review_state,
'author': pr.author,
'out_of_date': pr.build_state in ['failure', 'success', None] and not pr.is_up_to_date(),
}
pr_configs.append(pr_config)
else:
pr_configs = None
# FIXME recent deploy history
wb_config = {
'index': i,
'branch': wb.branch.short_str(),
'sha': wb.sha,
# FIXME generate links to the merge log
'deploy_batch_id': wb.deploy_batch.id if wb.deploy_batch and hasattr(wb.deploy_batch, 'id') else None,
'deploy_state': wb.deploy_state,
'repo': wb.branch.repo.short_str(),
'prs': pr_configs,
}
wb_configs.append(wb_config)
page_context = {
'watched_branches': wb_configs
}
return await render_template('ci', request, userdata, 'index.html', page_context)
def wb_and_pr_from_request(request):
watched_branch_index = int(request.match_info['watched_branch_index'])
pr_number = int(request.match_info['pr_number'])
if watched_branch_index < 0 or watched_branch_index >= len(watched_branches):
raise web.HTTPNotFound()
wb = watched_branches[watched_branch_index]
if not wb.prs or pr_number not in wb.prs:
raise web.HTTPNotFound()
return wb, wb.prs[pr_number]
@routes.get('/watched_branches/{watched_branch_index}/pr/{pr_number}')
@web_authenticated_developers_only()
async def get_pr(request, userdata): # pylint: disable=unused-argument
wb, pr = wb_and_pr_from_request(request)
page_context = {}
page_context['repo'] = wb.branch.repo.short_str()
page_context['wb'] = wb
page_context['pr'] = pr
# FIXME
if pr.batch:
if hasattr(pr.batch, 'id'):
status = await pr.batch.status()
jobs = await collect_agen(pr.batch.jobs())
for j in jobs:
j['duration'] = humanize_timedelta_msecs(j['duration'])
page_context['batch'] = status
page_context['jobs'] = jobs
# [4:] strips off gs:/
page_context['artifacts'] = f'{BUCKET}/build/{pr.batch.attributes["token"]}'[4:]
else:
page_context['exception'] = '\n'.join(
traceback.format_exception(None, pr.batch.exception, pr.batch.exception.__traceback__))
batch_client = request.app['batch_client']
batches = batch_client.list_batches(
f'test=1 pr={pr.number}')
batches = sorted([b async for b in batches], key=lambda b: b.id, reverse=True)
page_context['history'] = [await b.status() for b in batches]
return await render_template('ci', request, userdata, 'pr.html', page_context)
async def retry_pr(wb, pr, request):
app = request.app
session = await aiohttp_session.get_session(request)
if pr.batch is None:
log.info('retry cannot be requested for PR #{pr.number} because it has no batch')
set_message(
session,
f'Retry cannot be requested for PR #{pr.number} because it has no batch.',
'error')
return
batch_id = pr.batch.id
dbpool = app['dbpool']
async with dbpool.acquire() as conn:
async with conn.cursor() as cursor:
await cursor.execute('INSERT INTO invalidated_batches (batch_id) VALUES (%s);', batch_id)
await wb.notify_batch_changed(app)
log.info(f'retry requested for PR: {pr.number}')
set_message(session, f'Retry requested for PR #{pr.number}.', 'info')
@routes.post('/watched_branches/{watched_branch_index}/pr/{pr_number}/retry')
@check_csrf_token
@web_authenticated_developers_only(redirect=False)
async def post_retry_pr(request, userdata): # pylint: disable=unused-argument
wb, pr = wb_and_pr_from_request(request)
await asyncio.shield(retry_pr(wb, pr, request))
return web.HTTPFound(
deploy_config.external_url('ci', f'/watched_branches/{wb.index}/pr/{pr.number}'))
@routes.get('/batches')
@web_authenticated_developers_only()
async def get_batches(request, userdata):
batch_client = request.app['batch_client']
batches = [b async for b in batch_client.list_batches()]
statuses = [await b.status() for b in batches]
page_context = {
'batches': statuses
}
return await render_template('ci', request, userdata, 'batches.html', page_context)
@routes.get('/batches/{batch_id}')
@web_authenticated_developers_only()
async def get_batch(request, userdata):
batch_id = int(request.match_info['batch_id'])
batch_client = request.app['batch_client']
b = await batch_client.get_batch(batch_id)
status = await b.status()
jobs = await collect_agen(b.jobs())
for j in jobs:
j['duration'] = humanize_timedelta_msecs(j['duration'])
page_context = {
'batch': status,
'jobs': jobs
}
return await render_template('ci', request, userdata, 'batch.html', page_context)
@routes.get('/batches/{batch_id}/jobs/{job_id}')
@web_authenticated_developers_only()
async def get_job(request, userdata):
batch_id = int(request.match_info['batch_id'])
job_id = int(request.match_info['job_id'])
batch_client = request.app['batch_client']
job = await batch_client.get_job(batch_id, job_id)
page_context = {
'batch_id': batch_id,
'job_id': job_id,
'job_log': await job.log(),
'job_status': json.dumps(await job.status(), indent=2)
}
return await render_template('ci', request, userdata, 'job.html', page_context)
@routes.post('/authorize_source_sha')
@check_csrf_token
@web_authenticated_developers_only(redirect=False)
async def post_authorized_source_sha(request, userdata): # pylint: disable=unused-argument
app = request.app
dbpool = app['dbpool']
post = await request.post()
sha = post['sha'].strip()
async with dbpool.acquire() as conn:
async with conn.cursor() as cursor:
await cursor.execute('INSERT INTO authorized_shas (sha) VALUES (%s);', sha)
log.info(f'authorized sha: {sha}')
session = await aiohttp_session.get_session(request)
set_message(session, f'SHA {sha} authorized.', 'info')
return web.HTTPFound(
deploy_config.external_url('ci', '/'))
@routes.get('/healthcheck')
async def healthcheck(request): # pylint: disable=unused-argument
return web.Response(status=200)
gh_router = gh_routing.Router()
@gh_router.register('pull_request')
async def pull_request_callback(event):
gh_pr = event.data['pull_request']
number = gh_pr['number']
target_branch = FQBranch.from_gh_json(gh_pr['base'])
for wb in watched_branches:
if (wb.prs and number in wb.prs) or (wb.branch == target_branch):
await wb.notify_github_changed(event.app)
@gh_router.register('push')
async def push_callback(event):
data = event.data
ref = data['ref']
if ref.startswith('refs/heads/'):
branch_name = ref[len('refs/heads/'):]
branch = FQBranch(Repo.from_gh_json(data['repository']), branch_name)
for wb in watched_branches:
if wb.branch == branch or any(pr.branch == branch for pr in wb.prs.values()):
await wb.notify_github_changed(event.app)
@gh_router.register('pull_request_review')
async def pull_request_review_callback(event):
gh_pr = event.data['pull_request']
number = gh_pr['number']
for wb in watched_branches:
if number in wb.prs:
await wb.notify_github_changed(event.app)
async def github_callback_handler(request):
event = gh_sansio.Event.from_http(request.headers, await request.read())
event.app = request.app
await gh_router.dispatch(event)
@routes.post('/github_callback')
async def github_callback(request):
await asyncio.shield(github_callback_handler(request))
return web.Response(status=200)
async def batch_callback_handler(request):
app = request.app
params = await request.json()
log.info(f'batch callback {params}')
attrs = params.get('attributes')
if attrs:
target_branch = attrs.get('target_branch')
if target_branch:
for wb in watched_branches:
if wb.branch.short_str() == target_branch:
log.info(f'watched_branch {wb.branch.short_str()} notify batch changed')
await wb.notify_batch_changed(app)
@routes.get('/api/v1alpha/deploy_status')
@rest_authenticated_developers_only
async def deploy_status(request, userdata): # pylint: disable=unused-argument
batch_client = request.app['batch_client']
async def get_failure_information(batch):
jobs = await collect_agen(batch.jobs())
return [
{**j,
'log': await batch_client.get_job_log(j['batch_id'], j['job_id'])}
for j in jobs if j['state'] != 'Success']
wb_configs = [{
'branch': wb.branch.short_str(),
'sha': wb.sha,
'deploy_batch_id': wb.deploy_batch.id if wb.deploy_batch and hasattr(wb.deploy_batch, 'id') else None,
'deploy_state': wb.deploy_state,
'repo': wb.branch.repo.short_str(),
'failure_information': None if wb.deploy_state == 'success' else await get_failure_information(wb.deploy_batch)
} for wb in watched_branches]
return web.json_response(wb_configs)
@routes.post('/api/v1alpha/update')
@rest_authenticated_developers_only
async def post_update(request, userdata): # pylint: disable=unused-argument
log.info(f'developer triggered update')
async def update_all():
for wb in watched_branches:
await wb.update(request.app)
await asyncio.ensure_future(update_all())
return web.Response(status=200)
@routes.post('/api/v1alpha/dev_deploy_branch')
@rest_authenticated_developers_only
async def dev_deploy_branch(request, userdata):
app = request.app
params = await request.json()
branch = FQBranch.from_short_str(params['branch'])
steps = params['steps']
gh = app['github_client']
request_string = f'/repos/{branch.repo.owner}/{branch.repo.name}/git/refs/heads/{branch.name}'
branch_gh_json = await gh.getitem(request_string)
sha = branch_gh_json['object']['sha']
unwatched_branch = UnwatchedBranch(branch, sha, userdata)
batch_client = app['batch_client']
batch_id = await unwatched_branch.deploy(batch_client, steps)
return web.json_response({'sha': sha, 'batch_id': batch_id})
@routes.post('/api/v1alpha/batch_callback')
async def batch_callback(request):
await asyncio.shield(batch_callback_handler(request))
return web.Response(status=200)
async def update_loop(app):
while True:
try:
for wb in watched_branches:
log.info(f'updating {wb.branch.short_str()}')
await wb.update(app)
except concurrent.futures.CancelledError:
raise
except Exception: # pylint: disable=broad-except
log.exception(f'{wb.branch.short_str()} update failed due to exception')
await asyncio.sleep(300)
async def on_startup(app):
session = aiohttp.ClientSession(
raise_for_status=True,
timeout=aiohttp.ClientTimeout(total=60))
app['client_session'] = session
app['github_client'] = gh_aiohttp.GitHubAPI(
aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total=60)),
'ci',
oauth_token=oauth_token)
app['batch_client'] = await BatchClient('ci', session=session)
app['dbpool'] = await create_database_pool()
asyncio.ensure_future(update_loop(app))
async def on_cleanup(app):
session = app['client_session']
await session.close()
dbpool = app['dbpool']
dbpool.close()
await dbpool.wait_closed()
def run():
app = web.Application()
setup_aiohttp_jinja2(app, 'ci')
setup_aiohttp_session(app)
app.on_startup.append(on_startup)
app.on_cleanup.append(on_cleanup)
setup_common_static_routes(routes)
app.add_routes(routes)
web.run_app(deploy_config.prefix_application(app, 'ci'),
host='0.0.0.0',
port=5000,
access_log_class=AccessLogger)
| 35.060827
| 119
| 0.671339
|
1fa8039567ac74bc6b470a1317ab8fdb58346e09
| 489
|
py
|
Python
|
iosDevCourse/core/management/commands/filldb.py
|
skylifewww/artdelo
|
55d235a59d8a3abdf0f904336c1c75a2be903699
|
[
"MIT"
] | null | null | null |
iosDevCourse/core/management/commands/filldb.py
|
skylifewww/artdelo
|
55d235a59d8a3abdf0f904336c1c75a2be903699
|
[
"MIT"
] | null | null | null |
iosDevCourse/core/management/commands/filldb.py
|
skylifewww/artdelo
|
55d235a59d8a3abdf0f904336c1c75a2be903699
|
[
"MIT"
] | null | null | null |
from django.core.management import call_command
from django.core.management.base import NoArgsCommand
from iosDevCourse.users.factories import generate_users
class Command(NoArgsCommand):
help = 'Fill the database with test fixtures'
def handle(self, *args, **options):
self.stdout.write('Starting fill db\r\n')
# fixture_list = []
# call_command('loaddata', *fixture_list)
generate_users()
self.stdout.write('Completed fill db\r\n')
| 25.736842
| 55
| 0.703476
|
19ad0943251b6d270c1eb010a5d7cd730e149888
| 476
|
py
|
Python
|
Chapter 8/bag.py
|
codered-by-ec-council/Micro-Degree-in-Python-Security
|
cb16ed78ee38dad32e3909371edec8ff3ce6e6a7
|
[
"MIT"
] | 4
|
2020-09-25T05:57:22.000Z
|
2021-02-27T14:56:23.000Z
|
Chapter 8/bag.py
|
codered-by-ec-council/Micro-Degree-in-Python-Security
|
cb16ed78ee38dad32e3909371edec8ff3ce6e6a7
|
[
"MIT"
] | 4
|
2021-06-08T23:01:11.000Z
|
2022-03-12T00:54:16.000Z
|
Chapter 8/bag.py
|
codered-by-ec-council/Micro-Degree-in-Python-Security
|
cb16ed78ee38dad32e3909371edec8ff3ce6e6a7
|
[
"MIT"
] | 5
|
2020-10-15T10:22:04.000Z
|
2021-11-16T22:17:50.000Z
|
from collections_extended import bag
b = bag("bananarama")
s = set("bananarama")
if __name__ == "__main__":
print(b.count("a"))
b.remove("a")
print(b.count("a"))
print("a" in b)
print(b.count("r"))
b.remove("r")
print(b.count("r"))
print("r" in b)
print("")
# print(s.count("a"))
s.remove("a")
# print(s.count("a"))
print("a" in s)
# print(s.count("r"))
s.remove("r")
# print(s.count("r"))
print("r" in s)
| 19.833333
| 36
| 0.521008
|
a6450618ca85997a065cd12179d9c2d7b4340cf7
| 39,445
|
py
|
Python
|
6 - Stitching.py
|
TAdeJong/LEEM-analysis
|
efd70a38b138c4bd0b2028cf199a6254366b286c
|
[
"MIT"
] | 8
|
2019-08-02T16:31:25.000Z
|
2022-03-22T19:34:44.000Z
|
6 - Stitching.py
|
TAdeJong/LEEM-analysis
|
efd70a38b138c4bd0b2028cf199a6254366b286c
|
[
"MIT"
] | 2
|
2021-08-04T14:49:44.000Z
|
2021-12-06T10:01:45.000Z
|
6 - Stitching.py
|
TAdeJong/LEEM-analysis
|
efd70a38b138c4bd0b2028cf199a6254366b286c
|
[
"MIT"
] | 5
|
2020-10-29T14:38:27.000Z
|
2022-02-17T16:13:05.000Z
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# cell_metadata_json: true
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.0
# kernelspec:
# display_name: Python [conda env:LEEM-analysis]
# language: python
# name: conda-env-LEEM-analysis-py
# ---
# # LEEM Image stitching
# ##### Author: Tobias A. de Jong (jongt@physics.leidenuniv.nl)
# This notebook facilitates image stitching of overlapping images. It is designed for use with ESCHER LEEM images.
# For those images, their positions are known approximately in terms of _stage coordinates_, i.e. the positions as reported by the sensors in the sample stage. It should however generalize to any set of overlapping images where relative positions of the images are known in some coordinate system which can approximately be transformed to coordinates in terms of pixels by an affine transformation (rotation, translation, mirroring).
#
# The algorithm consists of the following steps:
#
# 1. Using the stage coordinates for each image, obtain a nearest neighbour graph with the nearest `n_neighbors` neighbouring images for each image.
# 2. Obtain an initial guess for the transformation matrix between stage coordinates and pixel coordinates, by one of the following options:
# 1. Copying a known transformation matrix from an earlier run of a comparable dataset.
# 2. Manually overlaying some nearest neighbor images from the center of the dataset, either refining the estimate, or making a new eastimate for an unknown dataset
# 3. Calculate an initial estimate of the pixel coordinates of the images by applying the corresponding transformation to the stage coordinates
# 4. Apply a gaussian filter with width `sigma` to the original dataset and apply a magnitude sobel filter. Optionally scale down the images by an integer factor `z` in both directions to be able to reduce `fftsize` by the same factor, without reducing the sample area compared.
# 5. Iterate the following steps until the calculated image positions have converged to within `sigma`:
# 1. Obtain a nearest neighbour graph with per image the nearest `n_neighbors` neighbouring images from the current estimate of the pixel coordinates and calculate the difference vectors between each pair of nearest neighbours.
# 2. For each pair of neighboring images:
# 1. Calculate the cross-correlation between areas estimated to be in the center of the overlap of size `fftsize*fftsize` of the filtered data.
# If the estimated area is outside the valid area of the image defined by `mask`/`radius`, take an area as close to the intended area but still within the valid area as possible.
# 2. Find the location of the maximum in the cross-correlation. This corresponds to the correction to the estimate of the difference vector between the corresponding image position pair.
# 4. Calculate the weight of the match by dividing the maximum in the cross-correlation by the sqrt of the maximum of the auto-correlations.
# 3. Compute a new estimate of the difference vectors by adding the found corrections. Reconvert to a new estimate of pixel coordinates by minimizing the squared error in the system of equations for the positions, weighing my modified weights, either:
# 1. $w_{mod}= w - w_{min}$ for $w> w_{min}$, $w=0$ else, with w_min the maximum lower bound such that the graph of nearest neighbours with non-zero weights is still connected
# 2. Only use the 'maximum spanning tree' of weights, i.e. minus the minimum spanning tree of minus the weights, such that only the $n$ best matches are used.
# 6. (Optional) Refine the estimate of the transformation matrix, using all estimated difference vectors with a weight better than $w_{min est}$ and restart from step 3.
# 7. Repeat step 4. and 5. until `sigma` is satisfactory small. Optional repeat a final time with the original data if the signal to noise of the original data permits.
# 8. Select only the images for stitching where the average of the used weights (i.e. where $w > w_{min}$) is larger than $q_{thresh}$ for an appropriate value of $q_{thresh}$.
# 9. (Optional) For those images, match the intensities by calculating the intensity ratios between the overlap areas of size `fftsize*fftsize` and perform a global optimization.
# 10. Define a weighting mask, 1 in the center and sloping linearly to zero at the edges of the valid region, over a width of `bandwidth` pixels.
# 11. Per block of output `blocksize*blocksize`, select all images that have overlap with the particular output block, multiply each by the weighting mask and shift each image appropriately. Divide by an equivalently shifted stack of weighting masks. As such information at the center of images gets prioritized, and transitions get smoothed.
#
# ## Considerations
#
# For square grids with a decent amount of overlap, it makes sense to put `n_neighbors` to 5 (including the image itself), however, for larger overlaps or datasets where an extra dimension is available (such as landing energy), it can be appropiate to increase the number of nearest neighbors to which each image is matched.
#
# Parameters and intermediate results of the iteration are saved in an `xarray` and saved to disk for reproducibility.
#
#
# ## Parallelization
#
# Using [`dask`](https://dask.org), the following steps are parallelized:
#
# * step 5B, where each pair of images can be treated independently. In practice, parallelization is performed over blocks of subsequent images with their nearest neighbours.
# This could be improved upon in two ways: firstly by treating each pair only once, and secondly by making a nicer selection of blocks of images located close to eachother in the nnb graph. This would most likely require another (smarter) data structure than the nearest neighbour indexing matrix used now.
# * Step 6 is quite analogous to 5B and is parallelized similarly.
# * Step 11 is parallelized on a per-block basis. To optimize memory usage, results are directly streamed to a `zarr` array on disk.
# * The minimizations are parallelized by scipy natively.
#
# +
import time
import os
import datetime
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.collections import PatchCollection
import datetime
import numpy as np
import scipy.ndimage as ndi
import numba
import dask
import dask.array as da
from dask.delayed import delayed
from dask.distributed import Client, LocalCluster
import xarray as xr
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from sklearn.neighbors import NearestNeighbors
import zarr
from registration.stitching import *
# -
# To use this way, first start a dask-scheduler with some workers at the address, see http://docs.dask.org/en/latest/setup/cli.html
# Otherwise client= Client() also works by default.
cluster = LocalCluster(n_workers=1, threads_per_worker=6)
client = Client(cluster)
client
# ## Initializing the required data
# From a container with both stage coordinates and images, and associated metadata:
#
# - Per image:
# - `data`: measured intensity
# - `multiplier`: multiplier for the image intensity due to detector gain scaling
# - `stagecoords`: x,y coordinate of the stage for this image
# - Globally:
# - `nmperpixel`: nanometer per pixel for the used settings of the microscope
# - `LNS`: identifier string of the used microscope settings.
#
# The data used to showcase here is not (yet) publicly available, but corresponds to the Dark Field data of twisted bilayer graphene in Fig. 2c of [Lisi, S., Lu, X., Benschop, T. et al. Observation of flat bands in twisted bilayer graphene. Nat. Phys. 17, 189–193 (2021).](https://doi.org/10.1038/s41567-020-01041-x).
#
# Please note: that figure in the paper corresponds to the results of an earlier, unreleased version, of this code.
PEEM = False # PEEM data does not fill the images completely, so a different mask is used.
folder = './data'
name = '20191105_212845_5.7um_349.0_sweep-STAGE_X-STAGE_Y_closed_loop_DF'
container = xr.open_dataset(os.path.join(folder, name+'.nc'), chunks={'index': 1})
container
stagecoords = container.stagecoords.data.compute()
data = container.data.data
index = container.index.data
nmperpixel = container.nmperpixel
multipliers = container.multiplier.data.compute()
# +
dims = data.shape
xx, yy = np.meshgrid(np.arange(-dims[2]//2, dims[2]//2), np.arange(-dims[1]//2, dims[1]//2))
if PEEM:
outer_radius = 350
else:
outer_radius = 640
mask = (xx)**2 + (yy)**2 < outer_radius**2
fig,axs= plt.subplots(ncols=2, figsize=[15,8])
axs[0].imshow(data[len(data)//2].T, vmax=(np.nanmean(data[len(data)//2])*2))
axs[1].imshow(np.where(mask, data[len(data)//2], np.nan).T)#, vmax=(data[len(data)//2].mean()*2))
# +
def plot_stack(images, n):
"""Plot the n-th image from a stack of n images.
For interactive use with ipython widgets.
"""
im = images[n, :, :].compute()
plt.figure(figsize=[12,12])
plt.imshow(im.T, cmap='inferno')
plt.show()
interactive(lambda n: plot_stack(data, n),
n=widgets.IntSlider(1, 0, data.shape[0]-1, 1, continuous_update=False)
)
# -
# ## Optional: discard dark images
#
# Too dark images (e.g. Si substrate next to vdW flakes), will only take up computation time and distort results, so we can crop them out in the next two cells.
# +
base_extent = np.array([-dims[1]//2,dims[1]//2,-dims[2]//2, dims[2]//2])
Is = np.nanmax(np.where(mask, data, np.nan), axis=(1,2)) / multipliers
Is = Is.compute()
def I_mask_plot(ratio=3.5):
fig, ax = plt.subplots(figsize=[4*2,8])
I_thresh = Is.max() / ratio
scat_sel = ax.scatter(*stagecoords[:,Is >= I_thresh],
c=Is[Is>=I_thresh],
vmin=0, vmax=Is.max())
scat_not_sel = ax.scatter(*stagecoords[:,Is<I_thresh],
c=Is[Is<I_thresh],
vmin=0, vmax=Is.max(), marker='x')
I_mask = (Is >= I_thresh)
plt.colorbar(scat_sel, ax=ax)
ax.set_aspect('equal')
return I_mask
widget = interactive(I_mask_plot, ratio=(1.1, Is.max()/Is.min()+0.1, 0.1))
display(widget)
# +
I_mask = widget.result
data = data[I_mask]
stagecoords = stagecoords[:,I_mask]
multipliers = multipliers[I_mask]
# -
# ## More initialization
# Initialize the output xarray containing all the metadata of the progress.
center = np.mean(stagecoords, axis=1)
fftsize = 512
n_neighbors = 1 + 4 # sklearn nearest neighbors includes the point itself
output = xr.Dataset(
{"multiplier": (['index'], multipliers),
"fftsize": (['iteration'], [fftsize]),
},
coords = {
"index": index[I_mask],
"direction": ["x", "y"],
"iteration": [0],
"neighbor_no": np.arange(n_neighbors),
}
)
output.attrs["scriptversion"] = 'v10'
output.attrs["center"] = center
output
# ## 1. Finding the initial nearest neighbors
# For a simple square lattice with minimal overlap, the number of nearest neighbors is $4+1$, i.e. the image itself and it's horizontal and vertical neighbors. For larger overlaps or overlays, a larger number of neighbors may be specified.
nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm='kd_tree').fit(stagecoords.T)
nn = nbrs.kneighbors(stagecoords.T, return_distance=False)
output["nnbs"] = (("iteration", "indices", "neighbor_no"), [nn])
# +
# cropping to a small amount of images to calculate initial transformation matrix
if PEEM:
r = 0.05
else:
r = 0.003
cropindices, = np.where((np.abs(stagecoords[0]-center[0]) < r)
& (np.abs(stagecoords[1]-center[1]) < r))
cropnbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm='kd_tree').fit(stagecoords[:,cropindices].T)
cropstagecoords = stagecoords[:,cropindices]
cnb = cropnbrs.kneighbors(cropstagecoords.T, return_distance=False).squeeze()
fig, axs = plt.subplots(ncols=3, figsize=(15,6.5))
axs[0].scatter(*cropstagecoords, c=cropindices, cmap='nipy_spectral')
axs[0].set_xlim(center[0]-r*1.1, center[0]+r*1.1)
axs[0].set_ylim(center[1]-r*1.1, center[1]+r*1.1)
for i in range(len(cropindices)):
axs[0].annotate(str(cropindices[i]), cropstagecoords[:,i])
axs[0].set_title('Stage positions of cropped positions')
axs[1].plot(nn[:,1:]-nn[:,0][...,np.newaxis],'.', alpha=0.5)
axs[1].set_title('Index difference of nearest neighbors')
for x in nn:
axs[2].scatter(stagecoords[0][x[0]]- stagecoords[0][x[1:]], stagecoords[1][x[0]]- stagecoords[1][x[1:]])
axs[2].set_xlim(-0.005, 0.005)
axs[2].set_ylim(-0.003, 0.003)
axs[2].set_title('Stage coordinate differences\n between nearest neighbors.')
# -
# ## 2A: Predetermined approximate transformation matrices
LNS = container.attrs["LNS"]
if '5.7 um' in LNS:
FoV = "5.7 um"
# 5.7um 18eV
#A_calc = np.array([[ 2643.39574684, 256157.63546798],
# [-211200.54223502, 9852.21674877]])
# 5.7um 18eV, re-estimate based on best matches
#A_calc = np.array([[ 4082.15139025, 248042.15443774],
# [-219271.06227564, 12336.31827313]])
# 5.7um 8eV, re-estimate based on best matches
#array([[ 9086.96378958, 244718.34754219],
# [-211057.05649402, 13839.03554334]])
A_calc = np.array([[ 10561.34, 250889. ],
[-266954.5, 31157.17]] )
elif '2.3 um' in LNS:
FoV = "2.3 um"
#2.3um, initial estimate
A_calc = np.array([[ -14446.74981648, -739030.02309468],
[ 793733.26137892, -115473.44110854]])
elif '0.66 um' in LNS:
FoV = "0.66 um"
#0.66um, initial estimate
A_calc = np.array([[-250395.70614198, -909469.57525559],
[1041569.97155636, -247301.46262504]])
else:
FoV = "unknown"
print("WARNING: Unknown Field of View.")
output.attrs["FoV"] = FoV
FoV
# ## Step 2B: Manual overlap images
# Manually overlap image near the center to get an estimate.
#
# Can be skipped if a sufficient estimate is available in 2A.
#
# TODO: estimate shift with estimate from 2A it exists.
# +
#centerindex = np.argmin(np.linalg.norm(cropstagecoords.T-center, axis=1)) #Index of image closest to the center
centerindex = np.argmin(np.abs(cropindices - 27)) # Index closest to 27
data[cropindices[centerindex]].persist()
matchdata = data[cropindices[cnb[centerindex]]]
vmax = np.nanmax(matchdata)
print(cropindices[centerindex])
z = 10 # zoom factor to scale down the images
def plotoverlayshift(dx, dy, matchindex=2):
fig, axs = plt.subplots(ncols=1, figsize=[15,15])
match = cnb[centerindex][matchindex]
#axs.imshow(np.where(mask, data[cropindices][centerindex], 0).T, vmax=vmax, origin='lower')
axs.imshow(np.where(mask, matchdata[0], 0).T, vmax=vmax, origin='lower')
#s = ndi.shift(np.where(mask, data[cropindices][match], 0), (z*dx,z*dy))
s = ndi.shift(np.nan_to_num(matchdata[matchindex]), (z*dx,z*dy))
axs.annotate(str(cropindices[match]), (640+z*dx, 512+z*dy))
axs.imshow(s.T, cmap='inferno', vmax=vmax, zorder=2, origin='lower', alpha=0.6,)
return np.array([z*dx, z*dy]), match
print(cropindices[cnb[centerindex]])
widget = interactive(plotoverlayshift,
dx=widgets.IntSlider(value=0,
min=-dims[1]//z, max=dims[1]//z,
continuous_update=False),
dy=widgets.IntSlider(value=-43,
min=-dims[2]//z, max=dims[2]//z,
continuous_update=False),
matchindex=4
)
display(widget)
widget2 = interactive(plotoverlayshift,
dx=widgets.IntSlider(value=-52,
min=-dims[1]//z, max=dims[1]//z,
continuous_update=False),
dy=widgets.IntSlider(value=-2,
min=-dims[2]//z, max=dims[2]//z,
continuous_update=False),
matchindex=3
)
display(widget2)
# -
# Calculate the transformation. `S` are stage coordinates minus center coord,
# P are pixel coordinates.
# A_calc is then the desired transformation.
S, P = np.empty((2,2)), np.empty((2,2))
matchindex = [0,0]
P[:,0], matchindex[0] = widget.result
P[:,1], matchindex[1] = widget2.result
S = cropstagecoords[:, matchindex] - cropstagecoords[:, [centerindex]]
print(f'P = {P},\n S = {S},\n S^-1 = {np.linalg.inv(S)}')
A_calc = P @ np.linalg.inv(S)
A_calc, np.linalg.svd(A_calc)[1]*nmperpixel/1e6
# **Continue from here if you skipped 2B.**
output["A_calc"] = (['iteration', 'direction', 'direction2'], [A_calc]) # todo: make nicer direction coordinates
output
S,V,D = np.linalg.svd(A_calc)
print("nm per pixel according to container:", nmperpixel)
print("found x and y correction factor compared to container:", V*nmperpixel/1e6)
cpc = A_calc@(cropstagecoords- cropstagecoords.mean(axis=1, keepdims=True))
# ## Check the estimate for the transformation
#
# We plot some images from the center with their approximate positions to check if everything makes sense.
# +
def vertices_from_mask(mask, coarsen):
lmask = np.pad(mask,1)
fig = plt.figure()
CS = plt.contour(lmask, levels=[0.5])
pathcoords = CS.allsegs[0][0][::coarsen,:] - 1
plt.close(fig)
return pathcoords
def pathpatch_from_mask(mask, offset, coarsen=50, **kwargs):
pathcoords = vertices_from_mask(mask, coarsen) + np.array(offset)
pathpatch = mpl.patches.PathPatch(mpl.path.Path(pathcoords), **kwargs)
return pathpatch
color = plt.cm.nipy_spectral(np.linspace(0, 1, cpc.shape[1]))
pathpatches = [pathpatch_from_mask(mask.T, coord, coarsen=50, facecolor='None', edgecolor=color[i]) for i,coord in enumerate(cpc.T)]
fig, ax = plt.subplots()
for pathpatch in pathpatches:
ax.add_patch(pathpatch)
ax.autoscale_view()
ax.set_aspect('equal')
plt.show()
# +
fig,ax = plt.subplots(figsize=[10,10], constrained_layout=True)
base_extent = np.array([-dims[1]//2,dims[1]//2,-dims[2]//2, dims[2]//2])
ax.scatter(*cpc, c=cropindices, cmap='nipy_spectral',
zorder=5, linewidths=1, edgecolors='black')
cfac = 4
coarse_mask = da.coarsen(np.all, da.asarray(mask), {0:cfac,1:cfac})
cropdata = da.coarsen(np.mean, data[cropindices], {1:cfac,2:cfac}).persist()
xlim, ylim = np.array([ax.get_xlim(), ax.get_ylim()])
vmin, vmax = da.nanmin(cropdata).compute(), da.nanmax(cropdata).compute()
for i in range(len(cropdata)):
plt.imshow(np.where(coarse_mask, cropdata[i], np.nan).T,
extent=base_extent + np.array([cpc[0,i],cpc[0,i], cpc[1,i],cpc[1,i]]),
origin='lower',
#alpha=0.5,
cmap='gray',
vmin=vmin,vmax=vmax,
)
plt.annotate(str(cropindices[i]), +cpc[:,i], bbox=dict(facecolor='white', alpha=0.4, edgecolor='none'))
plt.colorbar()
ax.set_xlim(xlim + base_extent[:2])
ax.set_ylim(ylim + base_extent[2:]);
# -
# ## Step 3: Calculate approximate pixel coordinates from linear transformation
# Might or might not be a good idea to use.
data = data/data.mean(axis=0).compute()
sc = -A_calc @ (stagecoords - center[:,np.newaxis]) #stage-based coordinates in pixels
pc = sc.copy()
# +
color = plt.cm.nipy_spectral(np.linspace(0, 1, pc.shape[1]))
pathpatches = [pathpatch_from_mask(mask.T, coord-np.array(dims[1:])//2, c
oarsen=50, facecolor=color[i],
alpha=0.2)
for i,coord in enumerate(pc.T)]
fig, ax = plt.subplots()
for pathpatch in pathpatches:
ax.add_patch(pathpatch)
ax.autoscale_view()
ax.set_aspect('equal')
for i in range(len(pc.T)-1):
ax.arrow(*pc[:,i], *(pc[:,i+1]-pc[:,i])*0.9, head_width=100,
head_length=150, fc='k', length_includes_head=True, overhang=0.4)#, ec='k')
axin = inset_axes(ax, width="25%", height="20%", loc=7)
axin.tick_params(labelleft=False, labelbottom=False, direction='in', length=0)
axin.set_xlim(-1200,1200)
axin.set_ylim(-1100,1100)
axin.set_aspect('equal')
ipathpatches = [pathpatch_from_mask(mask.T, coord-pc.T[70]-np.array(dims[1:])//2,
coarsen=50, facecolor='none',
edgecolor=color[nn[70][i]])
for i,coord in enumerate(pc.T[nn[70]])]
for pathpatch in ipathpatches:
axin.add_patch(pathpatch)
for i,e in enumerate(e_clip):
rect = mpl.patches.Rectangle(e//2-fftsize//2,
fftsize, fftsize,
alpha=0.5,
facecolor=color[nn[70][i+1]], edgecolor='k')
axin.add_patch(rect)
plt.show()
# +
fig,ax = plt.subplots(figsize=[6,6])
base_extent = np.array([-dims[1]//2,dims[1]//2,-dims[2]//2, dims[2]//2])
ax.scatter(*pc, c=np.arange(pc.shape[1]), cmap='nipy_spectral', zorder=5)
for i,im in enumerate(data):
plt.annotate(str(i), pc[:,i])
ax.set_aspect('equal')
# -
# ## Step 4: apply digital filters
# Start with a large sigma, which you can decrease after succesful iteration of step 5.
output['pc'] = (('iteration', 'direction', 'index'), pc[None,...])
output['sigma'] = (['iteration'], [0])
output['z'] = (['iteration'], [0])
output['weights'] = (['iteration', 'index', 'neighbor_no'], np.zeros((1,len(output.index), n_neighbors)))
# ## Step 5: Calculating cross-correlations and optimizing positions
# +
sigma = 8 # usualy 24 is a good value to start, squeezing down to ~4 in the end
z = 4 # Scale down factor of the images.
# masked_data = np.where(mask, data, data.mean(axis=(0,1)))
if sigma != 0:
sobel = da.nan_to_num(only_filter(data, sigma=sigma))
else:
sobel = data
if z != 1:
sobel = da.coarsen(np.mean, sobel, axes={1:z,2:z})
sobel.persist()
dims = sobel.shape
base_extent = np.array([-dims[1]//2,dims[1]//2,-dims[2]//2, dims[2]//2]) # for plotting
xx, yy = np.meshgrid(np.arange(-fftsize//2, fftsize//2), np.arange(-fftsize//2, fftsize//2))
corr_mask = (np.abs(xx) < fftsize//2) & (np.abs(yy) < fftsize//2)
# -
interactive(lambda n: plot_stack(sobel, n),
n=widgets.IntSlider(1, 0, data.shape[0]-1, 1, continuous_update=False)
)
# +
res = {'x': None, 'y': None}
max_iter = 2
mst_iters = []
PLOT_ITERS = True
# Start iteration
for iteration in np.arange(max_iter):
new_output = output.isel(iteration=[-1])
new_output = new_output.assign_coords(iteration=new_output.iteration+1)
new_output.sigma.data = [sigma]
new_output.z.data = [z]
t = time.time()
diffvecs, nn = to_nn_diffvecs(pc / z, n_neighbors=n_neighbors)
diffvecs = da.from_array(diffvecs, chunks=(6,-1,2))
indexed = da.stack([sobel[nn[:, index]] for index in range(0, nn.shape[1])],
axis=1).rechunk({0:6, 1:-1})
corrections, w = find_correction_and_w(indexed[:,[0],...],
indexed[:,1:,...],
diffvecs,
fftsize // z)
# Suspicious minus sign
dr_new = corrections - diffvecs
dr_new, w_calc = da.compute(dr_new, w)
print("Time: {:.1f}s".format(time.time() - t))
print("Minimum number of connected components",
connected_bisect(0, w_calc, nn)+1.5)
w_min = bisect(connected_bisect, 0, 1, args=(w_calc, nn)) - 0.001
print("w_min: {:.3f}, w_mean: {:.3f}".format(w_min, w_calc.mean()))
mst = find_maximum_spanning_tree(w_calc, nn)
mst_w, mst_nn = weights_and_neighbours(mst)
print("mst_min: {:.3f}, mst_mean: {:.3f}".format(mst_w[mst_w != 0].min(), mst_w[mst_w != 0].mean()))
if PLOT_ITERS:
plt.figure(figsize=[6,4])
plt.hist(w_calc.flatten(), range=[0,1], bins=30, label='all')
plt.hist(mst_w[mst_w != 0].flatten(), range=[0,1], bins=30, label='mst')
plt.axvline(w_min, c='black', alpha=0.5)
plt.xlabel('weight')
plt.ylabel('Number of nnbs')
plt.legend()
plt.show()
w_alt = np.where(w_calc > w_min-0.001,
(w_calc - w_calc.min()+0.001) / (1-w_calc.min()),
0)
if iteration in mst_iters:
print("Use only the maximum weight spanning tree")
drx = transform_to_mst(dr_new[:,:,0], mst, nn)
dry = transform_to_mst(dr_new[:,:,1], mst, nn)
res['x'] = minimize(error_func,
pc[0] / z,
args=(mst_nn, mst_w**2, drx))
res['y'] = minimize(error_func,
pc[1] / z,
args=(mst_nn, mst_w**2, dry))
color = np.nanmin(np.where(mst_w != 0, mst_w, np.nan), axis=1)
cmap = 'viridis'
else:
print("Use all weights larger then w_min, such that the graph is still fully connected...")
res['x'] = minimize(error_func,
pc[0] / z,
args=(nn[:,1:], w_alt**2, dr_new[:,:,0]))
res['y'] = minimize(error_func,
pc[1] / z,
args=(nn[:,1:], w_alt**2, dr_new[:,:,1]))
color = w_calc.mean(axis=1)
cmap = 'inferno'
for coord, r in res.items():
if not r.success:
print(coord, r.message)
pc_new = np.stack([res['x'].x, res['y'].x]) * z
pc_new = pc_new - pc_new.mean(axis=1, keepdims=True)
pc_new_diff = np.abs(pc_new - pc)
pc = pc_new
print("Time: {:.1f}s".format(time.time() - t))
#Plot and set pc as the results of the optimization to start a new round
if PLOT_ITERS:
fig, ax = plt.subplots(ncols=2, figsize=[14,6])
ax[0].scatter(*pc, c=color, cmap=cmap,
zorder=5, linewidths=1, edgecolors='black', vmin=0, vmax=1)
ax[0].scatter(*sc, c=w_calc.min(axis=1), cmap=cmap,
zorder=5, marker='x', vmin=0, vmax=1, label='')
for i,im in enumerate(data):
ax[0].annotate(str(i), pc[:,i])
ax[0].set_aspect('equal')
ax[0].set_xlabel('x (pixels)')
ax[0].set_ylabel('y (pixels)')
qhist(np.linalg.norm(pc_new_diff, axis=0), w_calc.mean(axis=1), ax=ax[1], cmap='inferno', binbins=40)
ax[1].axvline(pc_new_diff.mean(), c='black', alpha=0.5)
ax[1].set_xlabel('pixels shifted compared to previous iteration')
ax[1].set_ylabel('Number of images')
plt.show()
new_output["pc"].data = pc[None,...]
new_output["nnbs"].data = nn[None,...]
new_output["weights"].data[..., 1:] = w_calc
output = xr.concat([output,new_output], dim='iteration')
if pc_new_diff.max() < 0.2*sigma:
print("Converged, breaking out of loop, consider decreasing sigma and or z")
break
else:
print(f"max shift: {pc_new_diff.max():.2f} pix, mean shift: {pc_new_diff.mean():.2f} pix")
print('done')
# End iteration
# -
# Render the positions in the last 10 iterations
ldat = output.sel(iteration=0)
plt.scatter(ldat.pc.sel(direction='x'), ldat.pc.sel(direction='y'), c=ldat.weights.isel(neighbor_no=slice(1,None)).mean(dim='neighbor_no'), s=1)
for i in output.iteration[-10:]:
ldat = output.sel(iteration=i)
plt.scatter(ldat.pc.sel(direction='x'), ldat.pc.sel(direction='y'), c=ldat.weights.isel(neighbor_no=slice(1,None)).mean(dim='neighbor_no'), s=5)
plt.scatter(ldat.pc.sel(direction='x'), ldat.pc.sel(direction='y'), c=ldat.weights.isel(neighbor_no=slice(1,None)).mean(dim='neighbor_no'), s=35, marker='x')
# ## (Optional) Step 6: Refine transformation based on best matches
#
# Pick an appropriate threshold for matches to use `w_lim` (usually about 0.9 is good). Compare the new linear transformation `A_prime` with `A_calc` used before. If significantly different, run the cell below to make `A_prime` the new transformation, and continue back at step 3.
# +
w_lim = 0.7
pcpercont = pc.reshape((pc.shape[0], 1, -1))
pixelcoords = (pcpercont - pcpercont.mean(axis=-1, keepdims=True)).reshape(pc.shape)
cstagecoords = -(stagecoords-center[:,np.newaxis])
red_dr = to_nn_diffvecs(pixelcoords, nn=nn)[0][w_calc > w_lim,:]
red_sc = to_nn_diffvecs(cstagecoords, nn=nn)[0][w_calc > w_lim,:]
res_transform = minimize(base_transformation_error,
A_calc,
args=(red_sc.T, red_dr.T, w_calc[w_calc > w_lim]))
A_prime = res_transform.x.reshape((2,2))
print(res_transform['message'])
fig, ax = plt.subplots(ncols=2, figsize=[12,6])
vmax = np.linalg.norm((A_calc@red_sc.T - red_dr.T), axis=0).max()
ax[0].scatter(*red_dr.T)
ax[0].scatter(*(A_calc @ red_sc.T), c=w_calc[w_calc > w_lim],
cmap='magma', alpha=0.5)
ax[0].quiver(*red_dr.T, *(A_calc @ red_sc.T - red_dr.T), w_calc[w_calc > w_lim],
angles='xy', scale_units='xy', scale=1, alpha=0.3, cmap='magma')
ax[0].set_title('A_calc')
ax[1].scatter(*red_dr.T)
ax[1].scatter(*(A_prime @ red_sc.T),
c=np.linalg.norm((A_prime@red_sc.T - red_dr.T), axis=0),
vmax=vmax, vmin=0, cmap='inferno_r', alpha=0.5)
ax[1].quiver(*red_dr.T, *(A_prime @ red_sc.T - red_dr.T), np.linalg.norm((A_prime@red_sc.T - red_dr.T), axis=0),
angles='xy', scale_units='xy', scale=1, alpha=0.3, cmap='inferno_r', clim=(0,vmax))
ax[1].set_title('A_prime')
A_prime, A_calc, np.linalg.svd(A_prime), np.linalg.svd(A_calc)
print(np.abs(red_dr.reshape((-1,2)).T - (A_calc @ red_sc.reshape((-1,2)).T)).mean(axis=1) )
with np.printoptions(precision=2):
print(A_prime, A_calc, A_prime/A_calc, sep='\n')
# -
# If in the plot above the colored dots (estimate from linear transformation) are a lot closer to their blue counterparts (current position after iteration) under `A_prime` than under `A_calc`, we might want to restart the iteration using the newly estimated linear transformation. To do so, run the cell below and restart step 5.
A_calc = A_prime.copy()
plt.scatter(*pc, c=np.arange(sc.shape[1]),
cmap='nipy_spectral', zorder=5, linewidths=1, edgecolors='black')
plt.scatter(*(-A_prime@(stagecoords-center[:,np.newaxis])), c=np.arange(sc.shape[1]),
cmap='nipy_spectral', zorder=5, marker='x')
sc = -A_calc @ (stagecoords - center[:,np.newaxis])
pc = sc.copy()
print("Now restart step 5")
# ### Defining the weighting mask
# +
def gen_weight_mask(datashape, radius=610, edgewidth=200):
xx, yy = np.meshgrid(np.arange(-datashape[2]//2, datashape[2]//2),
np.arange(-datashape[1]//2, datashape[1]//2))
if edgewidth == 0:
edgewidth = 1
mask_shift = np.array([0, 0])
circ = ((xx-mask_shift[0])**2 + (yy-mask_shift[1])**2 < outer_radius**2).astype(int)
# Add a pixel edge of zero, such that we can abuse ndi.distance_transform to get nice sloping edges
# Afterwards, remove these edges again
circ = np.pad(circ, 1)
weight_mask = ndi.distance_transform_edt(circ)
return np.where(weight_mask > edgewidth, 1, weight_mask / edgewidth)[1:-1, 1:-1]
dims = data.shape
if not PEEM:
# Set outer radius of the detector
outer_radius = 610
#outer_radius = 570
bandwidth = 200 # width of the linear slope towards 0
output.attrs["weight_mask"] = (outer_radius, bandwidth)
weight_mask = gen_weight_mask(dims, *output.attrs["weight_mask"])
#weight_mask = np.pad(gen_weight_mask(np.array([1,1280,1024]), *output.attrs["weight_mask"]), ((0,15),(0,37)))
plt.imshow(weight_mask.T)
# -
# ## 8. Select which images to use based on weights from current matches
# +
def get_quality_mask(q=0.7):
m = np.average(w_calc, axis=1, weights=w_calc > w_min) > q
return m
qs = np.arange(0, 1, 0.005)
plt.plot(qs, [get_quality_mask(q).sum() for q in qs], '.', label=name[50:])
plt.minorticks_on()
plt.grid(True, which='both')
plt.margins(x=0)
plt.ylim(0, None)
plt.title('Number of images with high enough quality matches as function of $q$')
plt.xlabel('q')
plt.ylabel('Number of images')
# +
def select_q_thresh(q_thresh):
msk = get_quality_mask(0,q_thresh)
c = np.average(w_calc, axis=1, weights=w_calc>=w_min)
plt.figure(clear=True)
plt.scatter(*np.where(msk, pc, np.nan), c=c,
cmap='inferno', zorder=5, vmax=1, vmin=w_min)
plt.scatter(*np.where(msk, np.nan, pc), c=c,
cmap='inferno', zorder=5, vmax=1, vmin=w_min,
marker='x')
plt.colorbar()
return q_thresh
widget = interactive(select_q_thresh,
q_thresh=widgets.FloatSlider(value=0.7,
min=0, max=1,
step=0.01, continuous_update=False))
display(widget)
# # +
q_thresh = widget.result
# -
# ## (Optional) Step 9: Matching the image intensities.
# +
rel_intens = np.ones_like(multipliers)
fig, ax = plt.subplots(ncols=1, figsize=[5,5])
qmask = get_quality_mask(i, q=q_thresh)
diffvecs, nn = to_nn_diffvecs(pc[:, qmask], n_neighbors=n_neighbors)
diffvecs = da.from_array(diffvecs, chunks=(1,-1,2))
#TODO: update to use @gufunc
indexed = da.stack([data[qmask][nn[:, index]] for index in range(0, nn.shape[1])],
axis=1).rechunk({0: 1, 1: -1})
regions = da.map_blocks(find_overlap_regions,
indexed,
diffvecs.rechunk({0:1})[..., np.newaxis],
mask=weight_mask,
dtype=np.float64, chunks=(1,4,2,2, fftsize, fftsize),
new_axis=(-1,-2))
region_intensities = regions[:,:,:,0]
region_weights = regions[:,:,:,1]
region_means = da.nanmean(region_intensities * region_weights, axis=(-1,-2)) / da.nanmean(region_weights, axis=(-1,-2))
region_means = region_means.compute()
region_ratios = region_means[..., 0] / region_means[..., 1]
Iopt_weight = np.where(w_calc[qmask, :4] > w_min - 0.001,
w_calc[qmask, :4],
0)**4
res_I = minimize(error_func,
np.zeros((qmask).sum()),
args=(nn[:,1:],
Iopt_weight,
np.log(region_ratios))
)
rel_intens[qmask] = np.exp(res_I.x)
rel = np.exp(res_I.x)
im = ax.scatter(*pc[:, qmask],
c=rel, zorder=5)
ax.set_aspect('equal')
plt.colorbar(im, ax=axs)
print(res_I.message)
# -
# ## Alternative step 9: use multiplier from container
# This method prevents intensity variation within the beam to propagate to the full span of the image.
rel_intens = multipliers
# Add metadata and write metadataoutput to disk
output.attrs["q_thresh"] = q_thresh
output["multiplier"] = ("index", rel_intens)
output.attrs["timestamp"] = datetime.datetime.now().strftime("%Y-%m-%d_%H%M")
output.to_netcdf(os.path.join(folder,name+'stitchdata-'+output.attrs["timestamp"]+'.nc'))
# ## Step 10: Merging images
#
# First we calculate the edges of the blocks and which images fall in which block.
# +
bsize = 1280
xedges = np.arange(np.floor(pc[0].min()), pc[0].max() + data.shape[1], bsize)[::-1]
xmask = np.abs((xedges[:, np.newaxis] - bsize//2) - pc[0]) < dims[1] + bsize//2
yedges = np.arange(np.floor(pc[1].min()), pc[1].max() + data.shape[2], bsize)[::-1]
ymask = np.abs((yedges[:, np.newaxis] - bsize//2) - pc[1]) < dims[2] + bsize//2
plt.scatter(*pc, c=np.arange(pc.shape[1]), zorder=5)
for x in xedges:
plt.axvline(x)
for y in yedges:
plt.axhline(y)
plt.gca().set_aspect('equal')
# +
@da.as_gufunc(signature="(i,j),(2),()->(i,j)", output_dtypes=np.float, vectorize=True)
def shift_images(image, shift, order=1):
"""Shift `image` by `shift` pixels."""
return ndi.shift(image, shift=shift, order=order)
fig, ax = plt.subplots(ncols=1,
figsize=[4, 4],
sharex=True, sharey=True)
ims = []
cs = 10
quality_mask = get_quality_mask(q_thresh)
print("images of enough quality: {} out of {}".format(np.sum(quality_mask),
np.sum(I_mask)))
ax.scatter(*pc, c=quality_mask, zorder=5, vmax=1, vmin=0)
total_mask = np.logical_and(xmask[:, None, :], ymask[None, ...])
total_mask = np.logical_and(total_mask,
quality_mask)
normalised_data = np.nan_to_num(data / rel_intens[:, np.newaxis, np.newaxis]) * weight_mask
e_mask = da.from_array(np.pad(weight_mask,
pad_width=((0, bsize-mask.shape[0]),
(0, bsize-mask.shape[1]))),
chunks=(-1,-1))
im_list = []
for i,x in enumerate(xedges):
temp_im = []
for j,y in enumerate(yedges):
mask_index = total_mask[i,j]
if np.count_nonzero(mask_index) > 0:
locdata = normalised_data[mask_index]
locdata = da.pad(locdata,
pad_width=((0, 0),
(0, bsize - locdata.shape[1]),
(0, bsize - locdata.shape[2])),
mode='constant')
locdata = locdata.rechunk({0:cs, 1:-1, 2:-1})
shifts = da.from_array([x, y] - pc.T[mask_index], chunks=(cs, -1))
image_sum = shift_images(locdata,
shifts, 1).sum(axis=0)
image_weight = shift_images(da.stack([e_mask] * mask_index.sum()).rechunk({0: cs}),
shifts, 1).sum(axis=0)
normed = (image_sum / image_weight)
temp_im.append(normed)
else:
temp_im.append(da.full((bsize,bsize),
fill_value=np.nan,
chunks=(-1, -1)))
im_list.append(temp_im)
ims.append(da.block(im_list).to_zarr(os.path.join(folder, name , f'results.zarr'),
compute=False, overwrite=True))
# -
# Actual computation
for im in ims:
t = time.time()
da.compute(im)
print(f'{time.time() - t:.1f}s')
# ## Reload image from disk and display
im = da.from_zarr(os.path.join(folder, name , f'results.zarr'))
small = trim_nans(da.coarsen(np.mean, im, axes={0:2,1:2}).compute())
small = small / np.nanmax(small, axis=(0,1), keepdims=True)
plt.figure(figsize=[25,22])
plt.imshow(small, vmax=np.nanmax(small), cmap='gray')
# ## Saving data
final_image = np.nan_to_num(trim_nans(im).compute(),
nan=np.nan,
posinf=np.nan,
neginf=np.nan)
final_image = final_image - np.nanmin(final_image, axis=(0,1))
final_image = final_image / np.nanmax(final_image, axis=(0,1))
tifimage = (final_image*(2**16 - 1)).T.astype(np.uint16)
plt.imshow(tifimage.squeeze())
# Save tif
from skimage.io import imsave
output_fname = 'stitch_'+output.attrs["scriptversion"]+'_'+output.attrs["timestamp"]+f'_sobel_{sigma}_bw_{bandwidth}'
imsave(os.path.join(folder, name, output_fname+'.tif'), tifimage)
# Optionally: save full precision float image to npy
np.save(os.path.join(folder, name, output_fname+'.npy'), final_image)
# Cleanup results{i}.zarr
import shutil
shutil.rmtree(os.path.join(folder, name, f'results.zarr'))
| 42.232334
| 434
| 0.64183
|
1fbba980f6325e2d704620cc505fec7050f691ef
| 4,372
|
py
|
Python
|
aether/sdk/health/tests/test_views.py
|
eHealthAfrica/aether-django-sdk-library
|
fc371af89bfed155d465049320f32bf43860d001
|
[
"Apache-2.0"
] | 1
|
2020-05-04T21:05:11.000Z
|
2020-05-04T21:05:11.000Z
|
aether/sdk/health/tests/test_views.py
|
eHealthAfrica/aether-django-sdk-library
|
fc371af89bfed155d465049320f32bf43860d001
|
[
"Apache-2.0"
] | 3
|
2019-09-30T15:45:43.000Z
|
2020-04-29T08:12:37.000Z
|
aether/sdk/health/tests/test_views.py
|
eHealthAfrica/aether-django-sdk-library
|
fc371af89bfed155d465049320f32bf43860d001
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import mock
from django.db.utils import OperationalError
from django.urls import reverse
from rest_framework import status
from aether.sdk.tests import AetherTestCase
class ViewsTest(AetherTestCase):
def test__health(self, *args):
response = self.client.get(reverse('health'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.content.decode(), '')
def test__check_db_ok(self, *args):
response = self.client.get(reverse('check-db'))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.content.decode(),
'Brought to you by eHealth Africa - good tech for hard places',
)
@mock.patch('aether.sdk.health.views.check_db_connection', return_value=False)
def test__check_db_down(self, *args):
response = self.client.get(reverse('check-db'))
self.assertEqual(response.status_code, 500)
self.assertEqual(
response.content.decode(),
'Always Look on the Bright Side of Life!!!',
)
@mock.patch('aether.sdk.health.utils.connection.cursor', side_effect=OperationalError)
def test__check_db__operational_error(self, *args):
response = self.client.get(reverse('check-db'))
self.assertEqual(response.status_code, 500)
self.assertEqual(
response.content.decode(),
'Always Look on the Bright Side of Life!!!',
)
@mock.patch('aether.sdk.health.utils.connection.cursor', side_effect=RuntimeError)
def test__check_db__another_error(self, *args):
response = self.client.get(reverse('check-db'))
self.assertEqual(response.status_code, 500)
self.assertEqual(
response.content.decode(),
'Always Look on the Bright Side of Life!!!',
)
def test__check_app(self, *args):
response = self.client.get(reverse('check-app'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
app_status = response.json()
self.assertEqual(app_status['app_name'], 'eha-test')
self.assertNotEqual(app_status['app_version'], '#.#.#')
self.assertNotEqual(app_status['app_revision'], '---')
def test__check_external_app__missing_app(self):
# "my-app" is not an external app
url = reverse('check-external', kwargs={'name': 'my-app'})
response = self.client.get(url)
self.assertEqual(response.status_code, 500)
self.assertEqual(
response.content.decode(),
'Always Look on the Bright Side of Life!!!',
)
def test__check_external_app__error(self):
# "app-1" is an external app
url = reverse('check-external', kwargs={'name': 'app-1'})
with mock.patch('aether.sdk.health.views.check_external_app', return_value=False):
response = self.client.get(url)
self.assertEqual(response.status_code, 500)
self.assertEqual(
response.content.decode(),
'Always Look on the Bright Side of Life!!!',
)
def test__check_external_app__ok(self):
# "app-2" is also an external app
url = reverse('check-external', kwargs={'name': 'app-2'})
with mock.patch('aether.sdk.health.views.check_external_app', return_value=True):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.content.decode(),
'Brought to you by eHealth Africa - good tech for hard places',
)
| 39.387387
| 90
| 0.661025
|
3d4b21294ef25e7a7b3ef2a81f43ed61683d28bc
| 1,240
|
py
|
Python
|
test/pytest_cases/test_io.py
|
GuiYuDaniel/CGC_of_Sn
|
c54e4e65a5ecff09d3e4c5fed76bf30b3804fefa
|
[
"MIT"
] | null | null | null |
test/pytest_cases/test_io.py
|
GuiYuDaniel/CGC_of_Sn
|
c54e4e65a5ecff09d3e4c5fed76bf30b3804fefa
|
[
"MIT"
] | null | null | null |
test/pytest_cases/test_io.py
|
GuiYuDaniel/CGC_of_Sn
|
c54e4e65a5ecff09d3e4c5fed76bf30b3804fefa
|
[
"MIT"
] | null | null | null |
# -*- coding:utf8 -*-
"""
测试src/utils/io功能是否正确执行
"""
import os
import pickle
import pytest
from utils.io import Path, Save, Load, Delete
class TestSave(object):
"""
test class Save functions
"""
def setup_class(self):
self.fake_path = Path._get_full_path(relative_path="fake", base_path_type="test") # 此处没使用config避免循环引用
self.file_path = os.path.join(self.fake_path, "test_results", "fake_cgc", "fake_rst.pkl")
self.test_data = {"peace": "love"}
if os.path.exists(self.file_path):
os.remove(self.file_path)
def teardown_class(self):
if os.path.exists(self.file_path):
os.remove(self.file_path)
def test_pickle_save_load_and_delete(self):
# save
Save.save_pickle(self.test_data, self.file_path)
assert os.path.exists(self.file_path)
with open(self.file_path, "rb") as f:
data = pickle.load(f)
assert data == self.test_data
# load
flag, data = Load.load_pickle(self.file_path)
assert flag
assert data == self.test_data
# delete
flag, msg = Delete.delete_pickle(self.file_path)
assert flag
assert not os.path.exists(self.file_path)
| 28.181818
| 110
| 0.633871
|
bdb1d535f5dc466e546d1688b048ced8b16daf10
| 7,440
|
py
|
Python
|
src/test/tests/databases/mili.py
|
cstatz/visit
|
f352f3984fa77392e81acbaa6943778a779f0435
|
[
"BSD-3-Clause"
] | null | null | null |
src/test/tests/databases/mili.py
|
cstatz/visit
|
f352f3984fa77392e81acbaa6943778a779f0435
|
[
"BSD-3-Clause"
] | null | null | null |
src/test/tests/databases/mili.py
|
cstatz/visit
|
f352f3984fa77392e81acbaa6943778a779f0435
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: mili.py
#
# Tests: mesh - 3D unstructured, multi-domain
# plots - Pseudocolor, material, vector, tensor, label
#
#
# Programmer: Alister Maguire
# Date: May 22, 2019
#
# Modifications:
#
# Alister Maguire, Mon Aug 5 13:02:05 MST 2019
# Added a test that handles .mili files containing integers in
# scientific notation.
#
# Alister Maguire, Thu Dec 19 13:40:07 PST 2019
# Added a test to make sure correct subrecord offsets are used.
#
# ----------------------------------------------------------------------------
RequiredDatabasePlugin("Mili")
single_domain_path = data_path("mili_test_data/single_proc/")
multi_domain_path = data_path("mili_test_data/multi_proc/")
def TestComponentVis():
OpenDatabase(single_domain_path + "/d3samp6.plt.mili")
v = GetView3D()
v.viewNormal = (0.9, 0.35, -0.88)
SetView3D(v)
SetTimeSliderState(90)
AddPlot("Pseudocolor", "Primal/Shared/edrate")
DrawPlots()
Test("mili_brick_comp")
ChangeActivePlotsVar("Primal/beam/svec/svec_x")
Test("mili_beam_comp")
ChangeActivePlotsVar("Primal/node/nodacc/ax")
Test("mili_nodacc_comp")
DeleteAllPlots()
def TestSharedElementSets():
OpenDatabase(single_domain_path + "/d3samp6.plt.mili")
v = GetView3D()
v.viewNormal = (0.9, 0.35, -0.88)
SetView3D(v)
SetTimeSliderState(90)
AddPlot("Pseudocolor", "Primal/Shared/strain/exy")
DrawPlots()
Test("mili_shared_es_01")
ChangeActivePlotsVar("Primal/Shared/edrate")
Test("mili_shared_es_02")
ChangeActivePlotsVar("Primal/Shared/stress/sy")
Test("mili_shared_es_03")
DeleteAllPlots()
def TestNonSharedElementSets():
OpenDatabase(single_domain_path + "/d3samp6.plt.mili")
v = GetView3D()
v.viewNormal = (0.9, 0.35, -0.88)
SetView3D(v)
SetTimeSliderState(90)
#
# eps is a section of an element set that is only
# defined on beams.
#
AddPlot("Pseudocolor", "Primal/beam/eps")
DrawPlots()
Test("mili_non_shared_es_01")
DeleteAllPlots()
def TestMaterialVar():
OpenDatabase(single_domain_path + "/d3samp6.plt.mili")
v = GetView3D()
v.viewNormal = (0.9, 0.35, -0.88)
SetView3D(v)
SetTimeSliderState(90)
AddPlot("Pseudocolor", "Primal/mat/matcgy")
DrawPlots()
Test("mili_mat_var_01")
ChangeActivePlotsVar("Primal/mat/matke")
Test("mili_mat_var_02")
DeleteAllPlots()
def TestTensors():
OpenDatabase(single_domain_path + "/d3samp6.plt.mili")
v = GetView3D()
v.viewNormal = (0.9, 0.35, -0.88)
SetView3D(v)
SetTimeSliderState(90)
AddPlot("Tensor", "Primal/Shared/stress")
DrawPlots()
Test("mili_tensors_01")
ChangeActivePlotsVar("Primal/Shared/strain")
Test("mili_tensors_02")
DeleteAllPlots()
def TestVectors():
OpenDatabase(single_domain_path + "/d3samp6.plt.mili")
v = GetView3D()
v.viewNormal = (0.9, 0.35, -0.88)
SetView3D(v)
SetTimeSliderState(90)
AddPlot("Vector", "Primal/node/nodpos")
DrawPlots()
Test("mili_vectors_01")
ChangeActivePlotsVar("Primal/shell/bend")
Test("mili_vectors_02")
ChangeActivePlotsVar("Primal/beam/svec")
Test("mili_vectors_03")
DeleteAllPlots()
def TestSandMesh():
OpenDatabase(single_domain_path + "/m_plot.mili")
v = GetView3D()
v.viewNormal = (0.9, 0.35, -0.88)
SetView3D(v)
SetTimeSliderState(101)
#
# First, let's look at the sand variable on a non-sanded mesh.
# It should be well structured.
#
AddPlot("Mesh", "mesh1")
AddPlot("Pseudocolor", "Primal/Shared/sand")
DrawPlots()
Test("mili_sand_mesh_01")
DeleteAllPlots()
#
# Now let's view the sand mesh. It's a mess.
#
AddPlot("Mesh", "sand_mesh1")
AddPlot("Pseudocolor", "Primal/Shared/sand")
DrawPlots()
Test("mili_sand_mesh_02")
#
# Now let's look at sand in its sanded state.
#
ChangeActivePlotsVar("sand_mesh/Primal/Shared/sand")
DrawPlots()
Test("mili_sand_mesh_03")
#
# We need to make sure that other variables can also be
# viewed in their sanded state.
#
ChangeActivePlotsVar("sand_mesh/Primal/shell/stress_mid/sx")
Test("mili_sand_mesh_04")
DeleteAllPlots()
def TestMaterials():
OpenDatabase(single_domain_path + "/d3samp6.plt.mili")
v = GetView3D()
v.viewNormal = (0.9, 0.35, -0.88)
SetView3D(v)
SetTimeSliderState(70)
AddPlot("FilledBoundary", "materials1(mesh1)")
DrawPlots()
Test("mili_materials_00")
DeleteAllPlots()
def TestMultiDomain():
OpenDatabase(multi_domain_path + "/d3samp6.plt.mili")
v = GetView3D()
v.viewNormal = (0.9, 0.35, -0.88)
SetView3D(v)
SetTimeSliderState(90)
AddPlot("Pseudocolor", "Primal/Shared/strain/exy")
DrawPlots()
Test("mili_multi_dom_01")
ChangeActivePlotsVar("Primal/Shared/stress/sz")
Test("mili_multi_dom_02")
DeleteAllPlots()
def TestParticles():
OpenDatabase(single_domain_path + "/sslide14ball_l.plt.mili")
v = GetView3D()
v.viewNormal = (0.9, 0.35, -0.88)
SetView3D(v)
AddPlot("Pseudocolor", "Primal/particle/stress/sxy")
DrawPlots()
Test("mili_particle_01")
DeleteAllPlots()
def TestStaticNodes():
OpenDatabase(single_domain_path + "/m1_plot.mili")
v = GetView3D()
v.viewNormal = (0.9, 0.35, -0.88)
SetView3D(v)
AddPlot("Mesh", "mesh1")
AddPlot("Pseudocolor", "Primal/node/temp")
SetTimeSliderState(10)
DrawPlots()
Test("mili_static_nodes_01")
DeleteAllPlots()
def TestLabels():
OpenDatabase(single_domain_path + "/d3samp6.plt.mili")
v = GetView3D()
v.viewNormal = (0.9, 0.35, -0.88)
SetView3D(v)
SetTimeSliderState(90)
AddPlot("Pseudocolor", "Primal/Shared/edrate")
AddPlot("Label", "OriginalZoneLabels")
DrawPlots()
Test("mili_zone_labels_01")
DeleteAllPlots()
AddPlot("Pseudocolor", "Primal/Shared/edrate")
AddPlot("Label", "OriginalNodeLabels")
DrawPlots()
Test("mili_node_labels_01")
DeleteAllPlots()
def TestSciNotation():
#
# Some .mili files contain integers in scientific notation.
# These need to be handled appropriately.
#
OpenDatabase(single_domain_path + "/HexModel1.plt.mili")
v = GetView3D()
v.viewNormal = (0.9, 0.35, -0.88)
SetView3D(v)
AddPlot("Pseudocolor", "Primal/brick/stress/sx")
DrawPlots()
Test("mili_from_sci_not")
DeleteAllPlots()
def TestMultiSubrecRead():
#
# This tests a bug fix that occurred when loading variables
# that span several subrecords at different offsets.
#
OpenDatabase(single_domain_path + "/test4_0.15.plt.mili")
v = GetView3D()
v.viewNormal = (0.9, 0.35, -0.88)
SetView3D(v)
AddPlot("Pseudocolor", "Primal/brick/stress/sx")
DrawPlots()
Test("mili_subrec_offset")
DeleteAllPlots()
def Main():
TestComponentVis()
TestNonSharedElementSets()
TestSharedElementSets()
TestMaterialVar()
TestTensors()
TestVectors()
TestSandMesh()
TestMaterials()
TestMultiDomain()
TestParticles()
TestStaticNodes()
TestLabels()
TestSciNotation()
TestMultiSubrecRead()
Main()
Exit()
| 24.8
| 78
| 0.644758
|
830472e4e7890f8993705e69ff0cfe75d06fd384
| 34,914
|
py
|
Python
|
Testing/Python/TestCoarsenModel.py
|
Numerics88/vtkbone
|
5a6ab2870679e9e7ea51926c34911607b9d85235
|
[
"MIT"
] | 3
|
2017-04-04T04:59:22.000Z
|
2022-03-13T11:22:40.000Z
|
Testing/Python/TestCoarsenModel.py
|
Numerics88/vtkbone
|
5a6ab2870679e9e7ea51926c34911607b9d85235
|
[
"MIT"
] | 5
|
2017-04-06T19:46:39.000Z
|
2019-12-11T23:41:41.000Z
|
Testing/Python/TestCoarsenModel.py
|
Numerics88/vtkbone
|
5a6ab2870679e9e7ea51926c34911607b9d85235
|
[
"MIT"
] | 2
|
2017-04-29T20:54:57.000Z
|
2017-04-29T22:28:10.000Z
|
from __future__ import division
import unittest
from numpy.core import *
import numpy
import vtk
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
import vtkbone
def stress_strain_isotropic (E, nu):
s = nu/(1-nu)
t = (1-2*nu)/(2*(1-nu))
D = array ([[ 1, s, s, 0, 0, 0],
[ s, 1, s, 0, 0, 0],
[ s, s, 1, 0, 0, 0],
[ 0, 0, 0, t, 0, 0],
[ 0, 0, 0, 0, t, 0],
[ 0, 0, 0, 0, 0, t]]
)
D *= E*(1-nu)/((1+nu)*(1-2*nu))
return D
def stress_strain_orthotropic (E, nu, G):
nu12 = nu[0]
nu20 = nu[1]
nu01 = nu[2]
nu21 = (E[2]/E[1])*nu12
nu02 = (E[0]/E[2])*nu20
nu10 = (E[1]/E[0])*nu01
D = zeros((6,6), float)
D[:3,:3] = array ([[ 1/E[0], -nu10/E[1], -nu20/E[2]],
[ -nu01/E[0], 1/E[1], -nu21/E[2]],
[ -nu02/E[0], -nu12/E[1], 1/E[2]]])
D[3,3] = 1/G[0]
D[4,4] = 1/G[1]
D[5,5] = 1/G[2]
return numpy.linalg.inv(D)
def upper_triangular_to_square (ut):
s = zeros((6,6), dtype=float)
s[0,0] = ut[0]
s[1,:2] = ut[1:3]
s[:2,1] = ut[1:3]
s[2,:3] = ut[3:6]
s[:3,2] = ut[3:6]
s[3,:4] = ut[6:10]
s[:4,3] = ut[6:10]
s[4,:5] = ut[10:15]
s[:5,4] = ut[10:15]
s[5,:6] = ut[15:21]
s[:6,5] = ut[15:21]
return s
class TestCoarsenModel (unittest.TestCase):
def test_iso_x_gradient (self):
# Create 5x5x5 cube image
cellmap = arange (1, (5*5*5)+1, dtype=int16)
cellmap.shape = (5,5,5)
cellmap_flat = cellmap.flatten().copy()
cellmap_vtk = numpy_to_vtk(cellmap_flat, deep=1)
image = vtk.vtkImageData()
image.SetDimensions((6,6,6)) # x,y,z order
image.SetSpacing(1.5,1.5,1.5)
image.SetOrigin(3.5,4.5,5.5)
image.GetCellData().SetScalars(cellmap_vtk)
# Convert to mesh
geometry_generator = vtkbone.vtkboneImageToMesh()
geometry_generator.SetInputData(image)
geometry_generator.Update()
geometry = geometry_generator.GetOutput()
# Generate materials with gradient in E along x.
E = zeros((5,5,5),float32)
E[:,:,:] = arange(1.0,6.0,dtype=float32)
E_vtk = numpy_to_vtk (E.flatten(), deep=1)
nu = 0.3*ones(125,float32)
nu_vtk = numpy_to_vtk (nu, deep=1)
material = vtkbone.vtkboneLinearIsotropicMaterialArray()
material.SetYoungsModulus(E_vtk)
material.SetPoissonsRatio(nu_vtk)
self.assertEqual (material.GetSize(), 125)
material_table = vtkbone.vtkboneMaterialTable()
material_table.AddMaterial (1, material)
# Generate model
generator = vtkbone.vtkboneApplyCompressionTest()
generator.SetInputData(0, geometry)
generator.SetInputData(1, material_table)
generator.Update()
model = generator.GetOutput()
# Apply coarsener
coarsener = vtkbone.vtkboneCoarsenModel()
coarsener.SetInputData (model)
coarsener.SetMaterialAveragingMethod (vtkbone.vtkboneCoarsenModel.LINEAR)
coarsener.Update()
coarse_model = coarsener.GetOutput()
bounds = coarse_model.GetBounds()
self.assertAlmostEqual (bounds[0], 3.5)
self.assertAlmostEqual (bounds[1], 12.5)
self.assertAlmostEqual (bounds[2], 4.5)
self.assertAlmostEqual (bounds[3], 13.5)
self.assertAlmostEqual (bounds[4], 5.5)
self.assertAlmostEqual (bounds[5], 14.5)
coarse_material = coarse_model.GetMaterialTable().GetMaterial(1)
self.assertTrue (isinstance (coarse_material, vtkbone.vtkboneLinearIsotropicMaterialArray))
self.assertEqual (coarse_material.GetSize(), 3**3)
coarse_E = vtk_to_numpy (coarse_material.GetYoungsModulus())
coarse_E.shape = (3,3,3)
self.assertTrue (alltrue(abs(coarse_E[:,:,0] - 1.5) < 1E-6))
self.assertTrue (alltrue(abs(coarse_E[:,:,1] - 3.5) < 1E-6))
self.assertTrue (alltrue(abs(coarse_E[:,:,2] - 5) < 1E-6))
def test_iso_y_gradient (self):
# Create 5x5x5 cube image
cellmap = arange (1, (5*5*5)+1, dtype=int16)
cellmap.shape = (5,5,5)
cellmap_flat = cellmap.flatten().copy()
cellmap_vtk = numpy_to_vtk(cellmap_flat, deep=1)
image = vtk.vtkImageData()
image.SetDimensions((6,6,6)) # x,y,z order
image.SetSpacing(1.5,1.5,1.5)
image.SetOrigin(3.5,4.5,5.5)
image.GetCellData().SetScalars(cellmap_vtk)
# Convert to mesh
geometry_generator = vtkbone.vtkboneImageToMesh()
geometry_generator.SetInputData(image)
geometry_generator.Update()
geometry = geometry_generator.GetOutput()
# Generate materials with gradient in E along y.
E = zeros((5,5,5),float32)
k,j,i = numpy.mgrid[0:5,0:5,0:5]
E[k,j,i] = 1+j
E_vtk = numpy_to_vtk (E.flatten(), deep=1)
nu = 0.3*ones(125,float32)
nu_vtk = numpy_to_vtk (nu, deep=1)
material = vtkbone.vtkboneLinearIsotropicMaterialArray()
material.SetYoungsModulus(E_vtk)
material.SetPoissonsRatio(nu_vtk)
self.assertEqual (material.GetSize(), 125)
material_table = vtkbone.vtkboneMaterialTable()
material_table.AddMaterial (1, material)
# Generate model
generator = vtkbone.vtkboneApplyCompressionTest()
generator.SetInputData(0, geometry)
generator.SetInputData(1, material_table)
generator.Update()
model = generator.GetOutput()
# Apply coarsener
coarsener = vtkbone.vtkboneCoarsenModel()
coarsener.SetInputData (model)
coarsener.SetMaterialAveragingMethod (vtkbone.vtkboneCoarsenModel.LINEAR)
coarsener.Update()
coarse_model = coarsener.GetOutput()
bounds = coarse_model.GetBounds()
self.assertAlmostEqual (bounds[0], 3.5)
self.assertAlmostEqual (bounds[1], 12.5)
self.assertAlmostEqual (bounds[2], 4.5)
self.assertAlmostEqual (bounds[3], 13.5)
self.assertAlmostEqual (bounds[4], 5.5)
self.assertAlmostEqual (bounds[5], 14.5)
coarse_material = coarse_model.GetMaterialTable().GetMaterial(1)
self.assertTrue (isinstance (coarse_material, vtkbone.vtkboneLinearIsotropicMaterialArray))
self.assertEqual (coarse_material.GetSize(), 3**3)
coarse_E = vtk_to_numpy (coarse_material.GetYoungsModulus())
coarse_E.shape = (3,3,3)
self.assertTrue (alltrue(abs(coarse_E[:,0,:] - 1.5) < 1E-6))
self.assertTrue (alltrue(abs(coarse_E[:,1,:] - 3.5) < 1E-6))
self.assertTrue (alltrue(abs(coarse_E[:,2,:] - 5) < 1E-6))
def test_iso_z_gradient (self):
# Create 5x5x5 cube image
cellmap = arange (1, (5*5*5)+1, dtype=int16)
cellmap.shape = (5,5,5)
cellmap_flat = cellmap.flatten().copy()
cellmap_vtk = numpy_to_vtk(cellmap_flat, deep=1)
image = vtk.vtkImageData()
image.SetDimensions((6,6,6)) # x,y,z order
image.SetSpacing(1.5,1.5,1.5)
image.SetOrigin(3.5,4.5,5.5)
image.GetCellData().SetScalars(cellmap_vtk)
# Convert to mesh
geometry_generator = vtkbone.vtkboneImageToMesh()
geometry_generator.SetInputData(image)
geometry_generator.Update()
geometry = geometry_generator.GetOutput()
# Generate materials with gradient in E along y.
E = zeros((5,5,5),float32)
k,j,i = numpy.mgrid[0:5,0:5,0:5]
E[k,j,i] = 1+k
E_vtk = numpy_to_vtk (E.flatten(), deep=1)
nu = 0.3*ones(125,float32)
nu_vtk = numpy_to_vtk (nu, deep=1)
material = vtkbone.vtkboneLinearIsotropicMaterialArray()
material.SetYoungsModulus(E_vtk)
material.SetPoissonsRatio(nu_vtk)
self.assertEqual (material.GetSize(), 125)
material_table = vtkbone.vtkboneMaterialTable()
material_table.AddMaterial (1, material)
# Generate model
generator = vtkbone.vtkboneApplyCompressionTest()
generator.SetInputData(0, geometry)
generator.SetInputData(1, material_table)
generator.Update()
model = generator.GetOutput()
# Apply coarsener
coarsener = vtkbone.vtkboneCoarsenModel()
coarsener.SetInputData (model)
coarsener.SetMaterialAveragingMethod (vtkbone.vtkboneCoarsenModel.LINEAR)
coarsener.Update()
coarse_model = coarsener.GetOutput()
bounds = coarse_model.GetBounds()
self.assertAlmostEqual (bounds[0], 3.5)
self.assertAlmostEqual (bounds[1], 12.5)
self.assertAlmostEqual (bounds[2], 4.5)
self.assertAlmostEqual (bounds[3], 13.5)
self.assertAlmostEqual (bounds[4], 5.5)
self.assertAlmostEqual (bounds[5], 14.5)
coarse_material = coarse_model.GetMaterialTable().GetMaterial(1)
self.assertTrue (isinstance (coarse_material, vtkbone.vtkboneLinearIsotropicMaterialArray))
self.assertEqual (coarse_material.GetSize(), 3**3)
coarse_E = vtk_to_numpy (coarse_material.GetYoungsModulus())
coarse_E.shape = (3,3,3)
self.assertTrue (alltrue(abs(coarse_E[0,:,:] - 1.5) < 1E-6))
self.assertTrue (alltrue(abs(coarse_E[1,:,:] - 3.5) < 1E-6))
self.assertTrue (alltrue(abs(coarse_E[2,:,:] - 5) < 1E-6))
def test_cube_with_holes_linear (self):
# Create 4x4x4 cube image
cellmap = ones((4,4,4), int16) # z,y,x order
# Punch some holes in such as way that the first coarse element
# has one hole, the second two, etc... Make sure 1,1,1 is the
# last to be knocked out, to avoid reduced the bounds of the
# FE model.
offsets = array([[0,2,0,2,0,2,0,2],
[0,0,2,2,0,0,2,2],
[0,0,0,0,2,2,2,2]])
cellmap[1+offsets[2],1+offsets[1],0+offsets[0]] = 0
offsets = offsets[:,1:]
cellmap[1+offsets[2],0+offsets[1],0+offsets[0]] = 0
offsets = offsets[:,1:]
cellmap[0+offsets[2],0+offsets[1],0+offsets[0]] = 0
offsets = offsets[:,1:]
cellmap[0+offsets[2],1+offsets[1],1+offsets[0]] = 0
offsets = offsets[:,1:]
cellmap[1+offsets[2],0+offsets[1],1+offsets[0]] = 0
offsets = offsets[:,1:]
cellmap[0+offsets[2],0+offsets[1],1+offsets[0]] = 0
offsets = offsets[:,1:]
cellmap[0+offsets[2],1+offsets[1],0+offsets[0]] = 0
offsets = offsets[:,1:]
cellmap[1+offsets[2],1+offsets[1],1+offsets[0]] = 0
cellmap_flat = cellmap.flatten().copy()
cellmap_vtk = numpy_to_vtk(cellmap_flat, deep=1)
image = vtk.vtkImageData()
image.SetDimensions((5,5,5)) # x,y,z order
image.SetSpacing(1.5,1.5,1.5)
image.SetOrigin(3.5,4.5,5.5)
image.GetCellData().SetScalars(cellmap_vtk)
# Convert to mesh
geometry_generator = vtkbone.vtkboneImageToMesh()
geometry_generator.SetInputData(image)
geometry_generator.Update()
geometry = geometry_generator.GetOutput()
# Generate material.
material = vtkbone.vtkboneLinearIsotropicMaterial()
material.SetName("linear_iso_material")
material.SetYoungsModulus(6000)
material.SetPoissonsRatio(0.3)
mt_generator = vtkbone.vtkboneGenerateHomogeneousMaterialTable()
mt_generator.SetMaterial(material)
mt_generator.SetMaterialIdList(image.GetCellData().GetScalars())
mt_generator.Update()
material_table = mt_generator.GetOutput()
# Generate model
generator = vtkbone.vtkboneApplyCompressionTest()
generator.SetInputData(0, geometry)
generator.SetInputData(1, material_table)
generator.Update()
model = generator.GetOutput()
# Apply coarsener
coarsener = vtkbone.vtkboneCoarsenModel()
coarsener.SetInputData (model)
coarsener.SetMaterialAveragingMethod (vtkbone.vtkboneCoarsenModel.LINEAR)
coarsener.Update()
coarse_model = coarsener.GetOutput()
# Check bounds
bounds = coarse_model.GetBounds()
self.assertAlmostEqual (bounds[0], 3.5)
self.assertAlmostEqual (bounds[1], 9.5)
self.assertAlmostEqual (bounds[2], 4.5)
self.assertAlmostEqual (bounds[3], 10.5)
self.assertAlmostEqual (bounds[4], 5.5)
self.assertAlmostEqual (bounds[5], 11.5)
# Check materials: material array with 8 possible output materials
coarse_material = coarse_model.GetMaterialTable().GetMaterial(1)
self.assertTrue (isinstance (coarse_material, vtkbone.vtkboneLinearIsotropicMaterialArray))
self.assertEqual (coarse_material.GetSize(), 8)
coarse_E = vtk_to_numpy (coarse_material.GetYoungsModulus())
self.assertAlmostEqual (coarse_E[0], 6000*1/8)
self.assertAlmostEqual (coarse_E[1], 6000*2/8)
self.assertAlmostEqual (coarse_E[2], 6000*3/8)
self.assertAlmostEqual (coarse_E[3], 6000*4/8)
self.assertAlmostEqual (coarse_E[4], 6000*5/8)
self.assertAlmostEqual (coarse_E[5], 6000*6/8)
self.assertAlmostEqual (coarse_E[6], 6000*7/8)
self.assertAlmostEqual (coarse_E[7], 6000)
coarse_nu = vtk_to_numpy (coarse_material.GetPoissonsRatio())
self.assertAlmostEqual (coarse_nu[0], 0.3)
self.assertAlmostEqual (coarse_nu[1], 0.3)
self.assertAlmostEqual (coarse_nu[2], 0.3)
self.assertAlmostEqual (coarse_nu[3], 0.3)
self.assertAlmostEqual (coarse_nu[4], 0.3)
self.assertAlmostEqual (coarse_nu[5], 0.3)
self.assertAlmostEqual (coarse_nu[6], 0.3)
# Check cell scalars: point to appropriate material ID
cell_scalars = vtk_to_numpy (coarse_model.GetCellData().GetScalars())
self.assertEqual (len(cell_scalars), 7)
self.assertEqual (cell_scalars[0], 7)
self.assertEqual (cell_scalars[1], 6)
self.assertEqual (cell_scalars[2], 5)
self.assertEqual (cell_scalars[3], 4)
self.assertEqual (cell_scalars[4], 3)
self.assertEqual (cell_scalars[5], 2)
self.assertEqual (cell_scalars[6], 1)
def test_cube_with_holes_homminga_density (self):
# Create 4x4x4 cube image
cellmap = ones((4,4,4), int16) # z,y,x order
# Punch some holes in such as way that the first coarse element
# has one hole, the second two, etc... Make sure 1,1,1 is the
# last to be knocked out, to avoid reduced the bounds of the
# FE model.
offsets = array([[0,2,0,2,0,2,0,2],
[0,0,2,2,0,0,2,2],
[0,0,0,0,2,2,2,2]])
cellmap[1+offsets[2],1+offsets[1],0+offsets[0]] = 0
offsets = offsets[:,1:]
cellmap[1+offsets[2],0+offsets[1],0+offsets[0]] = 0
offsets = offsets[:,1:]
cellmap[0+offsets[2],0+offsets[1],0+offsets[0]] = 0
offsets = offsets[:,1:]
cellmap[0+offsets[2],1+offsets[1],1+offsets[0]] = 0
offsets = offsets[:,1:]
cellmap[1+offsets[2],0+offsets[1],1+offsets[0]] = 0
offsets = offsets[:,1:]
cellmap[0+offsets[2],0+offsets[1],1+offsets[0]] = 0
offsets = offsets[:,1:]
cellmap[0+offsets[2],1+offsets[1],0+offsets[0]] = 0
offsets = offsets[:,1:]
cellmap[1+offsets[2],1+offsets[1],1+offsets[0]] = 0
cellmap_flat = cellmap.flatten().copy()
cellmap_vtk = numpy_to_vtk(cellmap_flat, deep=1)
image = vtk.vtkImageData()
image.SetDimensions((5,5,5)) # x,y,z order
image.SetSpacing(1.5,1.5,1.5)
image.SetOrigin(3.5,4.5,5.5)
image.GetCellData().SetScalars(cellmap_vtk)
# Convert to mesh
geometry_generator = vtkbone.vtkboneImageToMesh()
geometry_generator.SetInputData(image)
geometry_generator.Update()
geometry = geometry_generator.GetOutput()
# Generate material.
material = vtkbone.vtkboneLinearIsotropicMaterial()
material.SetName("linear_iso_material")
material.SetYoungsModulus(6000)
material.SetPoissonsRatio(0.3)
mt_generator = vtkbone.vtkboneGenerateHomogeneousMaterialTable()
mt_generator.SetMaterial(material)
mt_generator.SetMaterialIdList(image.GetCellData().GetScalars())
mt_generator.Update()
material_table = mt_generator.GetOutput()
# Generate model
generator = vtkbone.vtkboneApplyCompressionTest()
generator.SetInputData(0, geometry)
generator.SetInputData(1, material_table)
generator.Update()
model = generator.GetOutput()
# Apply coarsener
coarsener = vtkbone.vtkboneCoarsenModel()
coarsener.SetInputData (model)
coarsener.SetMaterialAveragingMethod (vtkbone.vtkboneCoarsenModel.HOMMINGA_DENSITY)
coarsener.Update()
coarse_model = coarsener.GetOutput()
# Check bounds
bounds = coarse_model.GetBounds()
self.assertAlmostEqual (bounds[0], 3.5)
self.assertAlmostEqual (bounds[1], 9.5)
self.assertAlmostEqual (bounds[2], 4.5)
self.assertAlmostEqual (bounds[3], 10.5)
self.assertAlmostEqual (bounds[4], 5.5)
self.assertAlmostEqual (bounds[5], 11.5)
# Check materials: material array with 8 possible output materials
coarse_material = coarse_model.GetMaterialTable().GetMaterial(1)
self.assertTrue (isinstance (coarse_material, vtkbone.vtkboneLinearIsotropicMaterialArray))
self.assertEqual (coarse_material.GetSize(), 8)
coarse_E = vtk_to_numpy (coarse_material.GetYoungsModulus())
self.assertAlmostEqual (coarse_E[0], 6000*(1/8)**1.7, delta=1E-2)
self.assertAlmostEqual (coarse_E[1], 6000*(2/8)**1.7, delta=1E-2)
self.assertAlmostEqual (coarse_E[2], 6000*(3/8)**1.7, delta=1E-2)
self.assertAlmostEqual (coarse_E[3], 6000*(4/8)**1.7, delta=1E-2)
self.assertAlmostEqual (coarse_E[4], 6000*(5/8)**1.7, delta=1E-2)
self.assertAlmostEqual (coarse_E[5], 6000*(6/8)**1.7, delta=1E-2)
self.assertAlmostEqual (coarse_E[6], 6000*(7/8)**1.7, delta=1E-2)
self.assertAlmostEqual (coarse_E[7], 6000)
coarse_nu = vtk_to_numpy (coarse_material.GetPoissonsRatio())
self.assertAlmostEqual (coarse_nu[0], 0.3)
self.assertAlmostEqual (coarse_nu[1], 0.3)
self.assertAlmostEqual (coarse_nu[2], 0.3)
self.assertAlmostEqual (coarse_nu[3], 0.3)
self.assertAlmostEqual (coarse_nu[4], 0.3)
self.assertAlmostEqual (coarse_nu[5], 0.3)
self.assertAlmostEqual (coarse_nu[6], 0.3)
# Check cell scalars: point to appropriate material ID
cell_scalars = vtk_to_numpy (coarse_model.GetCellData().GetScalars())
self.assertEqual (len(cell_scalars), 7)
self.assertEqual (cell_scalars[0], 7)
self.assertEqual (cell_scalars[1], 6)
self.assertEqual (cell_scalars[2], 5)
self.assertEqual (cell_scalars[3], 4)
self.assertEqual (cell_scalars[4], 3)
self.assertEqual (cell_scalars[5], 2)
self.assertEqual (cell_scalars[6], 1)
def test_two_isotropic_materials_linear (self):
# Create 2x2x4 cube image
cellmap = zeros((4,2,2), int16) # z,y,x order
# Bottom output cell has 6 input cells, of which 2 are materialA (ID 10), 4 material B (ID 14)
cellmap[0,0,1] = 10
cellmap[0,1,1] = 12
cellmap[1,0,0] = 12
cellmap[1,0,1] = 10
cellmap[1,1,0] = 10
cellmap[1,1,1] = 10
# Top output cell has 5 input cells, of which 1 is material B
cellmap[2,0,0] = 10
cellmap[2,1,1] = 10
cellmap[2,1,0] = 10
cellmap[3,0,0] = 12
cellmap[3,1,0] = 10
cellmap_flat = cellmap.flatten().copy()
cellmap_vtk = numpy_to_vtk(cellmap_flat, deep=1)
image = vtk.vtkImageData()
image.SetDimensions((3,3,5)) # x,y,z order
image.SetSpacing(1.5,1.5,1.5)
image.SetOrigin(3.5,4.5,5.5)
image.GetCellData().SetScalars(cellmap_vtk)
# Convert to mesh
geometry_generator = vtkbone.vtkboneImageToMesh()
geometry_generator.SetInputData(image)
geometry_generator.Update()
geometry = geometry_generator.GetOutput()
# Generate materials.
materialA = vtkbone.vtkboneLinearIsotropicMaterial()
materialA.SetName("materialA")
materialA.SetYoungsModulus(6000)
materialA.SetPoissonsRatio(0.3)
materialB = vtkbone.vtkboneLinearIsotropicMaterial()
materialB.SetName("materialB")
materialB.SetYoungsModulus(4000)
materialB.SetPoissonsRatio(0.4)
material_table = vtkbone.vtkboneMaterialTable()
material_table.AddMaterial (10, materialA)
material_table.AddMaterial (12, materialB)
# Generate model
generator = vtkbone.vtkboneApplyCompressionTest()
generator.SetInputData(0, geometry)
generator.SetInputData(1, material_table)
generator.Update()
model = generator.GetOutput()
# Apply coarsener
coarsener = vtkbone.vtkboneCoarsenModel()
coarsener.SetInputData (model)
coarsener.SetMaterialAveragingMethod (vtkbone.vtkboneCoarsenModel.LINEAR)
coarsener.Update()
coarse_model = coarsener.GetOutput()
# Check bounds
bounds = coarse_model.GetBounds()
self.assertAlmostEqual (bounds[0], 3.5)
self.assertAlmostEqual (bounds[1], 6.5)
self.assertAlmostEqual (bounds[2], 4.5)
self.assertAlmostEqual (bounds[3], 7.5)
self.assertAlmostEqual (bounds[4], 5.5)
self.assertAlmostEqual (bounds[5], 11.5)
# Check cell scalars: sequence
cell_scalars = vtk_to_numpy (coarse_model.GetCellData().GetScalars())
self.assertEqual (len(cell_scalars), 2)
self.assertEqual (cell_scalars[0], 1)
self.assertEqual (cell_scalars[1], 2)
# Check materials
coarse_material = coarse_model.GetMaterialTable().GetMaterial(1)
self.assertTrue (isinstance (coarse_material, vtkbone.vtkboneLinearIsotropicMaterialArray))
self.assertEqual (coarse_material.GetSize(), 2)
coarse_E = vtk_to_numpy (coarse_material.GetYoungsModulus())
self.assertAlmostEqual (coarse_E[0], (4*6000 + 2*4000)/8)
self.assertAlmostEqual (coarse_E[1], (4*6000 + 1*4000)/8)
coarse_nu = vtk_to_numpy (coarse_material.GetPoissonsRatio())
self.assertAlmostEqual (coarse_nu[0], (4*0.3 + 2*0.4)/6)
self.assertAlmostEqual (coarse_nu[1], (4*0.3 + 1*0.4)/5)
def test_two_isotropic_materials_homminga_density (self):
# Create 2x2x4 cube image
cellmap = zeros((4,2,2), int16) # z,y,x order
# Bottom output cell has 6 input cells, of which 2 are materialA (ID 10), 4 material B (ID 14)
cellmap[0,0,1] = 10
cellmap[0,1,1] = 12
cellmap[1,0,0] = 12
cellmap[1,0,1] = 10
cellmap[1,1,0] = 10
cellmap[1,1,1] = 10
# Top output cell has 5 input cells, of which 1 is material B
cellmap[2,0,0] = 10
cellmap[2,1,1] = 10
cellmap[2,1,0] = 10
cellmap[3,0,0] = 12
cellmap[3,1,0] = 10
cellmap_flat = cellmap.flatten().copy()
cellmap_vtk = numpy_to_vtk(cellmap_flat, deep=1)
image = vtk.vtkImageData()
image.SetDimensions((3,3,5)) # x,y,z order
image.SetSpacing(1.5,1.5,1.5)
image.SetOrigin(3.5,4.5,5.5)
image.GetCellData().SetScalars(cellmap_vtk)
# Convert to mesh
geometry_generator = vtkbone.vtkboneImageToMesh()
geometry_generator.SetInputData(image)
geometry_generator.Update()
geometry = geometry_generator.GetOutput()
# Generate materials.
materialA = vtkbone.vtkboneLinearIsotropicMaterial()
materialA.SetName("materialA")
materialA.SetYoungsModulus(6000)
materialA.SetPoissonsRatio(0.3)
materialB = vtkbone.vtkboneLinearIsotropicMaterial()
materialB.SetName("materialB")
materialB.SetYoungsModulus(4000)
materialB.SetPoissonsRatio(0.4)
material_table = vtkbone.vtkboneMaterialTable()
material_table.AddMaterial (10, materialA)
material_table.AddMaterial (12, materialB)
# Generate model
generator = vtkbone.vtkboneApplyCompressionTest()
generator.SetInputData(0, geometry)
generator.SetInputData(1, material_table)
generator.Update()
model = generator.GetOutput()
# Apply coarsener
coarsener = vtkbone.vtkboneCoarsenModel()
coarsener.SetInputData (model)
coarsener.SetMaterialAveragingMethod (vtkbone.vtkboneCoarsenModel.HOMMINGA_DENSITY)
coarsener.Update()
coarse_model = coarsener.GetOutput()
# Check bounds
bounds = coarse_model.GetBounds()
self.assertAlmostEqual (bounds[0], 3.5)
self.assertAlmostEqual (bounds[1], 6.5)
self.assertAlmostEqual (bounds[2], 4.5)
self.assertAlmostEqual (bounds[3], 7.5)
self.assertAlmostEqual (bounds[4], 5.5)
self.assertAlmostEqual (bounds[5], 11.5)
# Check cell scalars: sequence
cell_scalars = vtk_to_numpy (coarse_model.GetCellData().GetScalars())
self.assertEqual (len(cell_scalars), 2)
self.assertEqual (cell_scalars[0], 1)
self.assertEqual (cell_scalars[1], 2)
# Check materials
coarse_material = coarse_model.GetMaterialTable().GetMaterial(1)
self.assertTrue (isinstance (coarse_material, vtkbone.vtkboneLinearIsotropicMaterialArray))
self.assertEqual (coarse_material.GetSize(), 2)
coarse_E = vtk_to_numpy (coarse_material.GetYoungsModulus())
self.assertAlmostEqual (coarse_E[0], ((4*6000**(1/1.7) + 2*4000**(1/1.7))/8)**1.7, delta=1E-2)
self.assertAlmostEqual (coarse_E[1], ((4*6000**(1/1.7) + 1*4000**(1/1.7))/8)**1.7, delta=1E-2)
coarse_nu = vtk_to_numpy (coarse_material.GetPoissonsRatio())
self.assertAlmostEqual (coarse_nu[0], (4*0.3 + 2*0.4)/6)
self.assertAlmostEqual (coarse_nu[1], (4*0.3 + 1*0.4)/5)
def test_mixed_materials_linear (self):
# Create 2x2x4 cube image
cellmap = zeros((4,2,2), int16) # z,y,x order
# Bottom output cell has 6 input cells, of which:
# 3 are materialA (ID 10)
# 2 material B (ID 12)
# 1 is material C (ID 15)
cellmap[0,0,1] = 10
cellmap[0,1,1] = 12
cellmap[1,0,0] = 12
cellmap[1,0,1] = 15
cellmap[1,1,0] = 10
cellmap[1,1,1] = 10
# Top output cell has 5 input cells, of which:
# 2 are materialA (ID 10)
# 1 is material B (ID 12)
# 2 are materialC (ID 15)
cellmap[2,0,0] = 15
cellmap[2,1,1] = 10
cellmap[2,1,0] = 10
cellmap[3,0,0] = 12
cellmap[3,1,0] = 15
cellmap_flat = cellmap.flatten().copy()
cellmap_vtk = numpy_to_vtk(cellmap_flat, deep=1)
image = vtk.vtkImageData()
image.SetDimensions((3,3,5)) # x,y,z order
image.SetSpacing(1.5,1.5,1.5)
image.SetOrigin(3.5,4.5,5.5)
image.GetCellData().SetScalars(cellmap_vtk)
# Convert to mesh
geometry_generator = vtkbone.vtkboneImageToMesh()
geometry_generator.SetInputData(image)
geometry_generator.Update()
geometry = geometry_generator.GetOutput()
# Generate materials.
materialA = vtkbone.vtkboneLinearIsotropicMaterial()
materialA.SetName("materialA")
materialA.SetYoungsModulus(6000)
materialA.SetPoissonsRatio(0.3)
DA = stress_strain_isotropic (6000.0, 0.3)
materialB = vtkbone.vtkboneLinearOrthotropicMaterial()
materialB.SetYoungsModulusX(1000)
materialB.SetYoungsModulusY(1100)
materialB.SetYoungsModulusZ(1200)
materialB.SetPoissonsRatioYZ(0.25)
materialB.SetPoissonsRatioZX(0.3)
materialB.SetPoissonsRatioXY(0.2)
# These values are not necessarily consistent
GYZ = 1000/(2*(1+0.25))
GZX = 1100/(2*(1+0.3))
GXY = 1200/(2*(1+0.2))
materialB.SetShearModulusYZ(GYZ)
materialB.SetShearModulusZX(GZX)
materialB.SetShearModulusXY(GXY)
DB = stress_strain_orthotropic ((1000.0, 1100.0, 1200.0),
( 0.25, 0.3, 0.2),
( GYZ, GZX, GXY))
materialC = vtkbone.vtkboneLinearAnisotropicMaterial()
DC = array((
(1571.653, 540.033, 513.822, 7.53 , -121.22 , -57.959),
( 540.033, 2029.046, 469.974, 78.591, -53.69 , -50.673),
( 513.822, 469.974, 1803.998, 20.377, -57.014, -15.761),
( 7.53 , 78.591, 20.377, 734.405, -23.127, -36.557),
(-121.22 , -53.69 , -57.014, -23.127, 627.396, 13.969),
( -57.959, -50.673, -15.761, -36.557, 13.969, 745.749)))
DC_vtk = numpy_to_vtk (DC, array_type=vtk.VTK_FLOAT)
materialC.SetStressStrainMatrix(DC_vtk)
material_table = vtkbone.vtkboneMaterialTable()
material_table.AddMaterial (10, materialA)
material_table.AddMaterial (12, materialB)
material_table.AddMaterial (15, materialC)
# Generate model
generator = vtkbone.vtkboneApplyCompressionTest()
generator.SetInputData(0, geometry)
generator.SetInputData(1, material_table)
generator.Update()
model = generator.GetOutput()
# Apply coarsener
coarsener = vtkbone.vtkboneCoarsenModel()
coarsener.SetInputData (model)
coarsener.SetMaterialAveragingMethod (vtkbone.vtkboneCoarsenModel.LINEAR)
coarsener.Update()
coarse_model = coarsener.GetOutput()
# Check bounds
bounds = coarse_model.GetBounds()
self.assertAlmostEqual (bounds[0], 3.5)
self.assertAlmostEqual (bounds[1], 6.5)
self.assertAlmostEqual (bounds[2], 4.5)
self.assertAlmostEqual (bounds[3], 7.5)
self.assertAlmostEqual (bounds[4], 5.5)
self.assertAlmostEqual (bounds[5], 11.5)
# Check cell scalars: sequence
cell_scalars = vtk_to_numpy (coarse_model.GetCellData().GetScalars())
self.assertEqual (len(cell_scalars), 2)
self.assertEqual (cell_scalars[0], 1)
self.assertEqual (cell_scalars[1], 2)
# Check materials
coarse_material = coarse_model.GetMaterialTable().GetMaterial(1)
self.assertTrue (isinstance (coarse_material, vtkbone.vtkboneLinearAnisotropicMaterialArray))
self.assertEqual (coarse_material.GetSize(), 2)
ut_vtk = coarse_material.GetStressStrainMatrixUpperTriangular()
ut = vtk_to_numpy (ut_vtk)
self.assertEqual (ut.shape, (2,21))
D1 = upper_triangular_to_square (ut[0])
D1_ref = (3*DA + 2*DB + 1*DC)/8
self.assertTrue (alltrue(abs(D1-D1_ref) < 1E-3))
D2 = upper_triangular_to_square (ut[1])
D2_ref = (2*DA + 1*DB + 2*DC)/8
self.assertTrue (alltrue(abs(D2-D2_ref) < 1E-3))
def test_mixed_materials_homminga_density (self):
# Create 2x2x4 cube image
cellmap = zeros((4,2,2), int16) # z,y,x order
# Bottom output cell has 6 input cells, of which:
# 3 are materialA (ID 10)
# 2 material B (ID 12)
# 1 is material C (ID 15)
cellmap[0,0,1] = 10
cellmap[0,1,1] = 12
cellmap[1,0,0] = 12
cellmap[1,0,1] = 15
cellmap[1,1,0] = 10
cellmap[1,1,1] = 10
# Top output cell has 5 input cells, of which:
# 2 are materialA (ID 10)
# 1 is material B (ID 12)
# 2 are materialA (ID 15)
cellmap[2,0,0] = 15
cellmap[2,1,1] = 10
cellmap[2,1,0] = 10
cellmap[3,0,0] = 12
cellmap[3,1,0] = 15
cellmap_flat = cellmap.flatten().copy()
cellmap_vtk = numpy_to_vtk(cellmap_flat, deep=1)
image = vtk.vtkImageData()
image.SetDimensions((3,3,5)) # x,y,z order
image.SetSpacing(1.5,1.5,1.5)
image.SetOrigin(3.5,4.5,5.5)
image.GetCellData().SetScalars(cellmap_vtk)
# Convert to mesh
geometry_generator = vtkbone.vtkboneImageToMesh()
geometry_generator.SetInputData(image)
geometry_generator.Update()
geometry = geometry_generator.GetOutput()
# Generate materials.
materialA = vtkbone.vtkboneLinearIsotropicMaterial()
materialA.SetName("materialA")
materialA.SetYoungsModulus(6000)
materialA.SetPoissonsRatio(0.3)
DA = stress_strain_isotropic (6000.0, 0.3)
materialB = vtkbone.vtkboneLinearOrthotropicMaterial()
materialB.SetYoungsModulusX(1000)
materialB.SetYoungsModulusY(1100)
materialB.SetYoungsModulusZ(1200)
materialB.SetPoissonsRatioYZ(0.25)
materialB.SetPoissonsRatioZX(0.3)
materialB.SetPoissonsRatioXY(0.2)
# These values are not necessarily consistent
GYZ = 1000/(2*(1+0.25))
GZX = 1100/(2*(1+0.3))
GXY = 1200/(2*(1+0.2))
materialB.SetShearModulusYZ(GYZ)
materialB.SetShearModulusZX(GZX)
materialB.SetShearModulusXY(GXY)
DB = stress_strain_orthotropic ((1000.0, 1100.0, 1200.0),
( 0.25, 0.3, 0.2),
( GYZ, GZX, GXY))
materialC = vtkbone.vtkboneLinearAnisotropicMaterial()
DC = array((
(1571.653, 540.033, 513.822, 7.53 , -121.22 , -57.959),
( 540.033, 2029.046, 469.974, 78.591, -53.69 , -50.673),
( 513.822, 469.974, 1803.998, 20.377, -57.014, -15.761),
( 7.53 , 78.591, 20.377, 734.405, -23.127, -36.557),
(-121.22 , -53.69 , -57.014, -23.127, 627.396, 13.969),
( -57.959, -50.673, -15.761, -36.557, 13.969, 745.749)))
DC_vtk = numpy_to_vtk (DC, array_type=vtk.VTK_FLOAT)
materialC.SetStressStrainMatrix(DC_vtk)
material_table = vtkbone.vtkboneMaterialTable()
material_table.AddMaterial (10, materialA)
material_table.AddMaterial (12, materialB)
material_table.AddMaterial (15, materialC)
# Generate model
generator = vtkbone.vtkboneApplyCompressionTest()
generator.SetInputData(0, geometry)
generator.SetInputData(1, material_table)
generator.Update()
model = generator.GetOutput()
# Apply coarsener
coarsener = vtkbone.vtkboneCoarsenModel()
coarsener.SetInputData (model)
coarsener.SetMaterialAveragingMethod (vtkbone.vtkboneCoarsenModel.HOMMINGA_DENSITY)
coarsener.Update()
coarse_model = coarsener.GetOutput()
# Check bounds
bounds = coarse_model.GetBounds()
self.assertAlmostEqual (bounds[0], 3.5)
self.assertAlmostEqual (bounds[1], 6.5)
self.assertAlmostEqual (bounds[2], 4.5)
self.assertAlmostEqual (bounds[3], 7.5)
self.assertAlmostEqual (bounds[4], 5.5)
self.assertAlmostEqual (bounds[5], 11.5)
# Check cell scalars: sequence
cell_scalars = vtk_to_numpy (coarse_model.GetCellData().GetScalars())
self.assertEqual (len(cell_scalars), 2)
self.assertEqual (cell_scalars[0], 1)
self.assertEqual (cell_scalars[1], 2)
# Check materials
coarse_material = coarse_model.GetMaterialTable().GetMaterial(1)
self.assertTrue (isinstance (coarse_material, vtkbone.vtkboneLinearAnisotropicMaterialArray))
self.assertEqual (coarse_material.GetSize(), 2)
ut_vtk = coarse_material.GetStressStrainMatrixUpperTriangular()
ut = vtk_to_numpy (ut_vtk)
self.assertEqual (ut.shape, (2,21))
D1 = upper_triangular_to_square (ut[0])
D1_ref = zeros((6,6), dtype=float)
for i in range(6):
for j in range (6):
x = 0.0
a = DA[i,j]
b = DB[i,j]
c = DC[i,j]
if a > 0:
x += 3*a**(1/1.7)
if a < 0:
x -= 3*(-a)**(1/1.7)
if b > 0:
x += 2*b**(1/1.7)
if b < 0:
x -= 2*(-b)**(1/1.7)
if c > 0:
x += 1*c**(1/1.7)
if c < 0:
x -= 1*(-c)**(1/1.7)
x /= 8
if x > 0:
D1_ref[i,j] = x**1.7
if x < 0:
D1_ref[i,j] = -((-x)**1.7)
self.assertTrue (alltrue(abs(D1-D1_ref) < 1E-2))
D2 = upper_triangular_to_square (ut[1])
D2_ref = zeros((6,6), dtype=float)
for i in range(6):
for j in range (6):
x = 0.0
a = DA[i,j]
b = DB[i,j]
c = DC[i,j]
if a > 0:
x += 2*a**(1/1.7)
if a < 0:
x -= 2*(-a)**(1/1.7)
if b > 0:
x += 1*b**(1/1.7)
if b < 0:
x -= 1*(-b)**(1/1.7)
if c > 0:
x += 2*c**(1/1.7)
if c < 0:
x -= 2*(-c)**(1/1.7)
x /= 8
if x > 0:
D2_ref[i,j] = x**1.7
if x < 0:
D2_ref[i,j] = -((-x)**1.7)
self.assertTrue (alltrue(abs(D2-D2_ref) < 1E-2))
if __name__ == '__main__':
unittest.main()
| 38.157377
| 98
| 0.657473
|
68c3975d00615d5d0b1714ac7f54931618cce331
| 4,025
|
py
|
Python
|
plugins/action/external_radius_server_info.py
|
steinzi/ansible-ise
|
0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0
|
[
"MIT"
] | null | null | null |
plugins/action/external_radius_server_info.py
|
steinzi/ansible-ise
|
0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0
|
[
"MIT"
] | null | null | null |
plugins/action/external_radius_server_info.py
|
steinzi/ansible-ise
|
0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0
|
[
"MIT"
] | null | null | null |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
try:
from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import (
AnsibleArgSpecValidator,
)
except ImportError:
ANSIBLE_UTILS_IS_INSTALLED = False
else:
ANSIBLE_UTILS_IS_INSTALLED = True
from ansible.errors import AnsibleActionFail
from ansible_collections.cisco.ise.plugins.module_utils.ise import (
ISESDK,
ise_argument_spec,
)
# Get common arguements specification
argument_spec = ise_argument_spec()
# Add arguments specific for this module
argument_spec.update(dict(
name=dict(type="str"),
id=dict(type="str"),
page=dict(type="int"),
size=dict(type="int"),
))
required_if = []
required_one_of = []
mutually_exclusive = []
required_together = []
class ActionModule(ActionBase):
def __init__(self, *args, **kwargs):
if not ANSIBLE_UTILS_IS_INSTALLED:
raise AnsibleActionFail("ansible.utils is not installed. Execute 'ansible-galaxy collection install ansible.utils'")
super(ActionModule, self).__init__(*args, **kwargs)
self._supports_async = True
self._result = None
# Checks the supplied parameters against the argument spec for this module
def _check_argspec(self):
aav = AnsibleArgSpecValidator(
data=self._task.args,
schema=dict(argument_spec=argument_spec),
schema_format="argspec",
schema_conditionals=dict(
required_if=required_if,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
),
name=self._task.action,
)
valid, errors, self._task.args = aav.validate()
if not valid:
raise AnsibleActionFail(errors)
def get_object(self, params):
new_object = dict(
name=params.get("name"),
id=params.get("id"),
page=params.get("page"),
size=params.get("size"),
)
return new_object
def run(self, tmp=None, task_vars=None):
self._task.diff = False
self._result = super(ActionModule, self).run(tmp, task_vars)
self._result["changed"] = False
self._check_argspec()
ise = ISESDK(params=self._task.args)
id = self._task.args.get("id")
name = self._task.args.get("name")
if id:
response = ise.exec(
family="external_radius_server",
function='get_external_radius_server_by_id',
params=self.get_object(self._task.args)
).response['ExternalRadiusServer']
self._result.update(dict(ise_response=response))
self._result.update(ise.exit_json())
return self._result
if name:
response = ise.exec(
family="external_radius_server",
function='get_external_radius_server_by_name',
params=self.get_object(self._task.args)
).response['ExternalRadiusServer']
self._result.update(dict(ise_response=response))
self._result.update(ise.exit_json())
return self._result
if not name and not id:
response = []
generator = ise.exec(
family="external_radius_server",
function='get_external_radius_server_generator',
params=self.get_object(self._task.args),
)
for item in generator:
tmp_response = item.response['SearchResult']['resources']
if isinstance(tmp_response, list):
response += tmp_response
else:
response.append(tmp_response)
self._result.update(dict(ise_response=response))
self._result.update(ise.exit_json())
return self._result
| 35.619469
| 128
| 0.624099
|
bfd4f2a03933d8078f8572ed09878b67d5bc9f6f
| 5,601
|
py
|
Python
|
coffeecups/models.py
|
J1bz/ecoloscore
|
68e3e7975c59dcf2db5f050ccea5f65d6f2d8645
|
[
"BSD-3-Clause"
] | null | null | null |
coffeecups/models.py
|
J1bz/ecoloscore
|
68e3e7975c59dcf2db5f050ccea5f65d6f2d8645
|
[
"BSD-3-Clause"
] | null | null | null |
coffeecups/models.py
|
J1bz/ecoloscore
|
68e3e7975c59dcf2db5f050ccea5f65d6f2d8645
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import (Model, ForeignKey, DateTimeField, CharField,
ManyToManyField, IntegerField, TextField)
from django.forms import ModelForm, CharField as formCharField, Textarea
from django.contrib.auth.models import User
from score.models import Score
# TODO: make sure that a user can't be in 2 cup policies
class Take(Model):
"""
A take is a record for a user taking a cup at a given date at a cup
distributor (we don't record where).
"""
user = ForeignKey(User)
date = DateTimeField(auto_now_add=True)
def save(self, *args, **kwargs):
"""
Depending on the number of taken cups during the last 12 hours,
the player's score is updated with a bonus or a malus referenced in
the administrable cup policy attached to the user.
If a user did not take any cup during a day, a crontab is supposed to
update his score with a bonus referenced in the same user attached
cup policy.
"""
if not self.pk: # only if take does not already exist
try:
policy = CupPolicy.objects.get(users=self.user)
now = datetime.now()
day = timedelta(hours=12)
# Cups taken in the last working day
taken_cups = Take.objects.filter(date__gte=(now - day),
user=self.user)
taken_cups_number = len(taken_cups)
if taken_cups_number == 0: # it means this take is the first
points = policy.take_of_the_day
else:
points = policy.take_malus
s = Score.objects.create(user=self.user, game='c',
value=points)
s.save()
except ObjectDoesNotExist:
pass
super(Take, self).save(*args, **kwargs)
def __unicode__(self):
return '{} took a cup at {}'.format(self.user, self.date)
class TakeForm(ModelForm):
class Meta:
model = Take
fields = ('user',)
class Throw(Model):
"""
A throw is a record for a user throwing a cup at a given date in a cup
bin (we don't record where).
"""
user = ForeignKey(User)
date = DateTimeField(auto_now_add=True)
def save(self, *args, **kwargs):
"""
If the number of thrown cups is inferior to the number of taken cups
during the last 12 hours, the player's score is updated with a bonus
referenced in the administrable cup policy attached to the user.
If it is superior, well, it is a good thing to throw other's people
cups, but since we don't want users to abuse of the system we just
don't record it.
"""
if self.pk: # If thrown already exist
super(Throw, self).save(*args, **kwargs)
else:
try:
now = datetime.now()
day = timedelta(hours=12)
# Cups taken in the last working day
taken_cups = Take.objects.filter(date__gte=(now - day),
user=self.user)
thrown_cups = Throw.objects.filter(date__gte=(now - day),
user=self.user)
if len(taken_cups) > len(thrown_cups):
policy = CupPolicy.objects.get(users=self.user)
points = policy.throw
s = Score.objects.create(user=self.user, game='c',
value=points)
s.save()
# Throw is not saved if it is just a throw that is not
# part of the game, that is to say if all user taken
# cups have already be thrown.
super(Throw, self).save(*args, **kwargs)
except ObjectDoesNotExist:
pass
def __unicode__(self):
return '{} throwed cup at {}'.format(self.user, self.date)
class ThrowForm(ModelForm):
class Meta:
model = Throw
fields = ('user',)
class CupPolicy(Model):
"""
A cup policy is a configurable object allowing ecoloscore administrators
to change some score bonuses/maluses and to choose which users are
concerned by this policy.
"""
name = CharField(max_length=32)
comment = TextField(blank=True)
users = ManyToManyField(User, blank=True)
# points given at the end of the day if you didn't take any cup during a
# week day (should be handled by a crontab)
no_takes = IntegerField()
# points given if the cup is the first one a user took this day
take_of_the_day = IntegerField()
# points given (should be negative) a cup has already been taken this day
take_malus = IntegerField()
# points given if you throwed a cup you took earlier this day
throw = IntegerField()
def __unicode__(self):
return 'Cup policy: {}'.format(self.name)
class Meta:
verbose_name_plural = 'cup policies'
class CupPolicyForm(ModelForm):
comment = formCharField(required=False, widget=Textarea)
class Meta:
model = CupPolicy
fields = (
'name',
'comment',
'users',
'no_takes',
'take_of_the_day',
'take_malus',
'throw',
)
| 31.466292
| 77
| 0.572041
|
71d7f89abadcf35a50c0989f4c464b113e80382a
| 11,569
|
py
|
Python
|
dqns/dqn_interface.py
|
0xsuu/gym-free-DQN
|
564b8f1d0e7ec67a9926d9036fb4c0d8b1385420
|
[
"Apache-2.0"
] | 3
|
2019-03-18T11:21:04.000Z
|
2021-11-17T11:08:12.000Z
|
dqns/dqn_interface.py
|
0xsuu/gym-free-DQN
|
564b8f1d0e7ec67a9926d9036fb4c0d8b1385420
|
[
"Apache-2.0"
] | null | null | null |
dqns/dqn_interface.py
|
0xsuu/gym-free-DQN
|
564b8f1d0e7ec67a9926d9036fb4c0d8b1385420
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2017 Project Mahjong. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod, abstractstaticmethod
from datetime import datetime
import os
import numpy as np
import tensorflow as tf
# Mode definitions.
TRAIN = 100
PLAY = 200
EVAL = 300
DEBUG = 400
SELF_PLAY = 500
EPSILON_LOWER_BOUND = 0.0
EPSILON_UPPER_BOUND = 1.0
REPLAY_MEMORY_SIZE_DEFAULT = 10000
REPLAY_MEMORY_BATCH_SIZE_DEFAULT = 32
TRAIN_STEP_INTERVAL_DEFAULT = 4
TARGET_UPDATE_INTERVAL_DEFAULT = 100
SAVE_WEIGHTS_INTERVAL_DEFAULT = 100
SELF_PLAY_UPDATE_INTERVAL_DEFAULT = 1001
PRINT_SUMMARY_INTERVAL_DEFAULT = 100
GAMMA_DEFAULT = 0.99 # Reward discount factor.
class DQNInterface:
__metaclass__ = ABCMeta
def __init__(self, action_count, weights_file_path,
model_input_shape=None,
mode=TRAIN, load_previous_model=False,
replay_memory_size=REPLAY_MEMORY_SIZE_DEFAULT,
replay_memory_batch_size=REPLAY_MEMORY_BATCH_SIZE_DEFAULT,
train_step_interval=TRAIN_STEP_INTERVAL_DEFAULT,
target_update_interval=TARGET_UPDATE_INTERVAL_DEFAULT,
gamma=GAMMA_DEFAULT,
initial_epsilon=1.0, final_epsilon=0.01, epsilon_decay_steps=10000):
self._mode = mode
self._action_count = action_count
self._weights_file_path = weights_file_path
# Initialising Q functions.
# Online model.
self._model = self._create_model(model_input_shape, action_count)
# Target model.
self._target_model = self._create_model(model_input_shape, action_count)
self._target_model.set_weights(self._model.get_weights()) # Copy weights.
self._target_update_interval = target_update_interval
self._replay_memory = self._create_replay_memory(replay_memory_size)
self._replay_memory_batch_size = replay_memory_batch_size
self._train_step_interval = train_step_interval
# Setup epsilon.
self._final_epsilon = final_epsilon
self._epsilon_decay_value = (initial_epsilon - final_epsilon) / epsilon_decay_steps
self._epsilon = initial_epsilon
# Setup gamma.
self._gamma = gamma
# Milestone variables.
self._timestamp = 0
self._timestamp_in_episode = 0
self._episode = 0
# Episode-wised status variables.
self._max_q_history = []
self._total_reward = 0
self._losses = []
# Period-wised status variables.
self._period_max_q_histories = []
self._period_total_rewards = []
# Load.
if load_previous_model:
self.load_previous_run()
if self._mode == TRAIN:
self._writer = self.setup_tensorboard_writer()
# Print info.
print("Mode:", mode,
"| Replay memory size:", replay_memory_size,
"| Train step interval:", train_step_interval,
"| Target update interval:", target_update_interval)
@staticmethod
@abstractstaticmethod
def _create_replay_memory(max_size=None):
raise Exception("Do not call abstract method.")
@abstractmethod
def _train_on_memory(self, observation_batch,
action_batch,
reward_batch,
observation_next_batch,
done_batch,
weights=None,
batch_indexes=None):
raise Exception("Do not call abstract method.")
@abstractmethod
def _sample_replay_memory(self):
raise Exception("Do not call abstract method.")
@staticmethod
@abstractstaticmethod
def _create_model(input_shape=None, action_count=None):
raise Exception("Do not call abstract method.")
@staticmethod
@abstractstaticmethod
def _pre_process(input_data):
raise Exception("Do not call abstract method.")
def append_memory_and_train(self, observation, action, reward, observation_next, done):
assert self._mode == TRAIN
self._replay_memory.append((self._pre_process(observation),
action,
reward,
self._pre_process(observation_next),
done))
if len(self._replay_memory) > self._replay_memory_batch_size and \
self._timestamp % self._train_step_interval == 0:
# Sample the mini batch.
environment = self._sample_replay_memory()
if len(environment) == 5:
# Queue memory.
observation_batch, \
action_batch, \
reward_batch, \
observation_next_batch, \
done_batch = environment
weights = None
batch_indexes = None
elif len(environment) == 7:
# Prioritised memory.
observation_batch, \
action_batch, \
reward_batch, \
observation_next_batch, \
done_batch, \
weights, \
batch_indexes = environment
else:
raise Exception("Unexpected number of returns from _sample_replay_memory()!")
# Observations must be in the shape of (1, ...).
# This should be handled in _pre_process function.
self._train_on_memory(observation_batch,
action_batch,
reward_batch,
observation_next_batch,
done_batch,
weights,
batch_indexes)
if self._timestamp % self._target_update_interval == 0:
self._target_model.set_weights(self._model.get_weights())
def make_action(self, observation, mode=None):
if mode is None:
backup_mode = None
mode = self._mode
else:
backup_mode = self._mode
self._mode = mode
if mode == TRAIN:
choice = self._epsilon_greedy_choose(self._pre_process(observation))
if self._epsilon > self._final_epsilon:
self._epsilon -= self._epsilon_decay_value
else:
choice = self._max_q_choose(self._pre_process(observation))
if backup_mode:
self._mode = backup_mode
return choice
def notify_reward(self, reward):
self._total_reward += reward
def _epsilon_greedy_choose(self, input_data):
"""
This function should ideally be used only under TRAIN mode.
:param input_data: the pre-processed data to feed into the neural network.
:return: The index(action) selected following epsilon greedy strategy.
"""
self._timestamp += 1
q_values = self._model.predict(input_data)[0]
self._max_q_history.append(np.max(q_values))
if np.random.uniform(EPSILON_LOWER_BOUND, EPSILON_UPPER_BOUND) < self._epsilon:
return np.random.randint(0, self._action_count) # Range is [0, self._action_count).
else:
# Choose the maximum Q's index as a policy.
return np.argmax(q_values)
def _max_q_choose(self, input_data):
q_values = self._model.predict(input_data)[0]
choice = np.argmax(q_values)
if self._mode == DEBUG:
print("Q values:", q_values)
print("Choice:", choice)
print()
return choice
def load_previous_run(self):
if os.path.isfile(self._weights_file_path):
print(self._weights_file_path, "loaded.")
self._model.load_weights(self._weights_file_path)
self._target_model.set_weights(self._model.get_weights()) # Copy weights.
@staticmethod
def setup_tensorboard_writer(title=str(datetime.now())):
return tf.summary.FileWriter("./logs/" + title)
def episode_finished(self, additional_logs):
self._episode += 1
if self._mode == TRAIN:
summary = tf.Summary()
if len(self._max_q_history) > 0:
average_max_q = sum(self._max_q_history) / len(self._max_q_history)
summary.value.add(tag="Average Max Q", simple_value=average_max_q)
if len(self._losses) > 0:
average_loss = sum(self._losses) / len(self._losses)
summary.value.add(tag="Average Loss", simple_value=average_loss)
summary.value.add(tag="Total Reward", simple_value=self._total_reward)
for tag in additional_logs:
summary.value.add(tag=tag, simple_value=additional_logs[tag])
# Append periodical data.
self._period_max_q_histories += self._max_q_history
self._period_total_rewards.append(self._total_reward)
# Periodical report.
if self._episode % PRINT_SUMMARY_INTERVAL_DEFAULT == 0:
if len(self._period_max_q_histories) > 0:
period_average_max_q = \
sum(self._period_max_q_histories) / len(self._period_max_q_histories)
else:
period_average_max_q = 0
period_average_total_reward = \
sum(self._period_total_rewards) / len(self._period_total_rewards)
tag_max_qs = "Average max Q over " + \
str(PRINT_SUMMARY_INTERVAL_DEFAULT) + " episodes"
tag_rewards = "Average Total reward over " + \
str(PRINT_SUMMARY_INTERVAL_DEFAULT) + " episodes"
print("Epsilon:", self._epsilon, "\t",
tag_max_qs + ":",
period_average_max_q, "\t",
tag_rewards + ":",
period_average_total_reward)
summary.value.add(tag=tag_max_qs, simple_value=period_average_max_q)
summary.value.add(
tag=tag_rewards,
simple_value=period_average_total_reward)
self._period_max_q_histories = []
self._period_total_rewards = []
# Reset status variables.
self._max_q_history = []
self._total_reward = 0
self._writer.add_summary(summary, self._episode)
self._writer.flush()
# Save weights.
if self._episode % SAVE_WEIGHTS_INTERVAL_DEFAULT == 0:
print("Finished", self._episode, "episodes.")
self._model.save_weights(self._weights_file_path)
elif self._mode == SELF_PLAY:
if self._episode % SELF_PLAY_UPDATE_INTERVAL_DEFAULT == 0:
print("Updated to the newest model.")
self._model.load_weights(self._weights_file_path)
| 38.822148
| 96
| 0.60714
|
99e6a237c4f3c776e314575141c8735fedf0b86c
| 3,184
|
py
|
Python
|
go/dlgo/agent/predict.py
|
huynq55/alpha-zero-general
|
7c7b8a9a09b79178157ec6b6d379a071c9f0994a
|
[
"MIT"
] | null | null | null |
go/dlgo/agent/predict.py
|
huynq55/alpha-zero-general
|
7c7b8a9a09b79178157ec6b6d379a071c9f0994a
|
[
"MIT"
] | null | null | null |
go/dlgo/agent/predict.py
|
huynq55/alpha-zero-general
|
7c7b8a9a09b79178157ec6b6d379a071c9f0994a
|
[
"MIT"
] | 1
|
2020-06-11T21:55:31.000Z
|
2020-06-11T21:55:31.000Z
|
# tag::dl_agent_imports[]
import numpy as np
from dlgo.agent.base import Agent
from dlgo.agent.helpers import is_point_an_eye
from dlgo import encoders
from dlgo import goboard
from dlgo import kerasutil
# end::dl_agent_imports[]
__all__ = [
'DeepLearningAgent',
'load_prediction_agent',
]
# tag::dl_agent_init[]
class DeepLearningAgent(Agent):
def __init__(self, model, encoder):
Agent.__init__(self)
self.model = model
self.encoder = encoder
# end::dl_agent_init[]
# tag::dl_agent_predict[]
def predict(self, game_state):
encoded_state = self.encoder.encode(game_state)
input_tensor = np.array([encoded_state])
return self.model.predict(input_tensor)[0]
def select_move(self, game_state):
num_moves = self.encoder.board_width * self.encoder.board_height
move_probs = self.predict(game_state)
# end::dl_agent_predict[]
# tag::dl_agent_probabilities[]
move_probs = move_probs ** 3 # <1>
eps = 1e-6
move_probs = np.clip(move_probs, eps, 1 - eps) # <2>
move_probs = move_probs / np.sum(move_probs) # <3>
# <1> Increase the distance between the move likely and least likely moves.
# <2> Prevent move probs from getting stuck at 0 or 1
# <3> Re-normalize to get another probability distribution.
# end::dl_agent_probabilities[]
# tag::dl_agent_candidates[]
candidates = np.arange(num_moves) # <1>
ranked_moves = np.random.choice(
candidates, num_moves, replace=False, p=move_probs) # <2>
for point_idx in ranked_moves:
point = self.encoder.decode_point_index(point_idx)
if game_state.is_valid_move(goboard.Move.play(point)) and \
not is_point_an_eye(game_state.board, point, game_state.next_player): # <3>
return goboard.Move.play(point)
return goboard.Move.pass_turn() # <4>
# <1> Turn the probabilities into a ranked list of moves.
# <2> Sample potential candidates
# <3> Starting from the top, find a valid move that doesn't reduce eye-space.
# <4> If no legal and non-self-destructive moves are left, pass.
# end::dl_agent_candidates[]
# tag::dl_agent_serialize[]
def serialize(self, h5file):
h5file.create_group('encoder')
h5file['encoder'].attrs['name'] = self.encoder.name()
h5file['encoder'].attrs['board_width'] = self.encoder.board_width
h5file['encoder'].attrs['board_height'] = self.encoder.board_height
h5file.create_group('model')
kerasutil.save_model_to_hdf5_group(self.model, h5file['model'])
# end::dl_agent_serialize[]
# tag::dl_agent_deserialize[]
def load_prediction_agent(h5file):
model = kerasutil.load_model_from_hdf5_group(h5file['model'])
encoder_name = h5file['encoder'].attrs['name']
if not isinstance(encoder_name, str):
encoder_name = encoder_name.decode('ascii')
board_width = h5file['encoder'].attrs['board_width']
board_height = h5file['encoder'].attrs['board_height']
encoder = encoders.get_encoder_by_name(
encoder_name, (board_width, board_height))
return DeepLearningAgent(model, encoder)
# tag::dl_agent_deserialize[]
| 37.904762
| 96
| 0.692525
|
f5785bbcb1ebba33d5b9163e7a4ee79d62bf4ba1
| 676
|
py
|
Python
|
neosis_telephone_directory/users/tests/test_urls.py
|
borkarfaiz/neosis_telephone_directory
|
d4a0f7197ac15f4993488e21459a744c370fde0f
|
[
"MIT"
] | null | null | null |
neosis_telephone_directory/users/tests/test_urls.py
|
borkarfaiz/neosis_telephone_directory
|
d4a0f7197ac15f4993488e21459a744c370fde0f
|
[
"MIT"
] | null | null | null |
neosis_telephone_directory/users/tests/test_urls.py
|
borkarfaiz/neosis_telephone_directory
|
d4a0f7197ac15f4993488e21459a744c370fde0f
|
[
"MIT"
] | null | null | null |
import pytest
from django.urls import resolve, reverse
from neosis_telephone_directory.users.models import User
pytestmark = pytest.mark.django_db
def test_detail(user: User):
assert (
reverse("users:detail", kwargs={"username": user.username})
== f"/users/{user.username}/"
)
assert resolve(f"/users/{user.username}/").view_name == "users:detail"
def test_update():
assert reverse("users:update") == "/users/~update/"
assert resolve("/users/~update/").view_name == "users:update"
def test_redirect():
assert reverse("users:redirect") == "/users/~redirect/"
assert resolve("/users/~redirect/").view_name == "users:redirect"
| 27.04
| 74
| 0.684911
|
e011e90342089565c46ded215f5ce068dc27b0c7
| 3,169
|
py
|
Python
|
WebDriverWait_ex.py
|
mengqhui/mypython
|
2829bf40684489f220cde0710a585677293f04ae
|
[
"Apache-2.0"
] | null | null | null |
WebDriverWait_ex.py
|
mengqhui/mypython
|
2829bf40684489f220cde0710a585677293f04ae
|
[
"Apache-2.0"
] | null | null | null |
WebDriverWait_ex.py
|
mengqhui/mypython
|
2829bf40684489f220cde0710a585677293f04ae
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# python selenium2 中的显示等待WebDriverWait与条件判断expected_conditions举例
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
base_url = "http://www.baidu.com"
driver = webdriver.Firefox()
driver.implicitly_wait(5)
'''隐式等待和显示等待都存在时,超时时间取二者中较大的'''
locator = (By.ID, 'kw')
driver.get(base_url)
WebDriverWait(driver, 10).until(EC.title_is(u"百度一下,你就知道"))
'''判断title,返回布尔值'''
WebDriverWait(driver, 10).until(EC.title_contains(u"百度一下"))
'''判断title,返回布尔值'''
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, 'kw')))
'''判断某个元素是否被加到了dom树里,并不代表该元素一定可见,如果定位到就返回WebElement'''
WebDriverWait(driver, 10).until(
EC.visibility_of_element_located((By.ID, 'su')))
'''判断某个元素是否被添加到了dom里并且可见,可见代表元素可显示且宽和高都大于0'''
WebDriverWait(driver, 10).until(EC.visibility_of(
driver.find_element(by=By.ID, value='kw')))
'''判断元素是否可见,如果可见就返回这个元素'''
WebDriverWait(driver, 10).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, '.mnav')))
'''判断是否至少有1个元素存在于dom树中,如果定位到就返回列表'''
WebDriverWait(driver, 10).until(
EC.visibility_of_any_elements_located((By.CSS_SELECTOR, '.mnav')))
'''判断是否至少有一个元素在页面中可见,如果定位到就返回列表'''
WebDriverWait(driver, 10).until(EC.text_to_be_present_in_element(
(By.XPATH, "//*[@id='u1']/a[8]"), u'设置'))
'''判断指定的元素中是否包含了预期的字符串,返回布尔值'''
WebDriverWait(driver, 10).until(
EC.text_to_be_present_in_element_value((By.CSS_SELECTOR, '#su'), u'百度一下'))
'''判断指定元素的属性值中是否包含了预期的字符串,返回布尔值'''
# WebDriverWait(driver,10).until(EC.frame_to_be_available_and_switch_to_it(locator))
'''判断该frame是否可以switch进去,如果可以的话,返回True并且switch进去,否则返回False'''
# 注意这里并没有一个frame可以切换进去
WebDriverWait(driver, 10).until(EC.invisibility_of_element_located(
(By.CSS_SELECTOR, '#swfEveryCookieWrap')))
'''判断某个元素在是否存在于dom或不可见,如果可见返回False,不可见返回这个元素'''
# 注意#swfEveryCookieWrap在此页面中是一个隐藏的元素
WebDriverWait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, "//*[@id='u1']/a[8]"))).click()
'''判断某个元素中是否可见并且是enable的,代表可点击'''
driver.find_element_by_xpath("//*[@id='wrapper']/div[6]/a[1]").click()
# WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.XPATH,"//*[@id='wrapper']/div[6]/a[1]"))).click()
# WebDriverWait(driver,10).until(EC.staleness_of(driver.find_element(By.ID,'su')))
'''等待某个元素从dom树中移除'''
# 这里没有找到合适的例子
WebDriverWait(driver, 10).until(EC.element_to_be_selected(
driver.find_element(By.XPATH, "//*[@id='nr']/option[1]")))
'''判断某个元素是否被选中了,一般用在下拉列表'''
WebDriverWait(driver, 10).until(EC.element_selection_state_to_be(
driver.find_element(By.XPATH, "//*[@id='nr']/option[1]"), True))
'''判断某个元素的选中状态是否符合预期'''
WebDriverWait(driver, 10).until(EC.element_located_selection_state_to_be(
(By.XPATH, "//*[@id='nr']/option[1]"), True))
'''判断某个元素的选中状态是否符合预期'''
driver.find_element_by_xpath(".//*[@id='gxszButton']/a[1]").click()
instance = WebDriverWait(driver, 10).until(EC.alert_is_present())
'''判断页面上是否存在alert,如果有就切换到alert并返回alert的内容'''
print instance.text
instance.accept()
driver.close()
| 36.848837
| 114
| 0.731145
|
9e06288751332f08fc647feaeaca73f090bc38b8
| 132
|
py
|
Python
|
newrelic_plugin_agent/constants.py
|
ambitioninc/django-newrelic-plugin-agent
|
8ae3029ffd86d6e3c81a59e54b65a3e1f86ba654
|
[
"MIT"
] | null | null | null |
newrelic_plugin_agent/constants.py
|
ambitioninc/django-newrelic-plugin-agent
|
8ae3029ffd86d6e3c81a59e54b65a3e1f86ba654
|
[
"MIT"
] | 4
|
2016-04-15T19:44:03.000Z
|
2016-11-29T15:01:02.000Z
|
newrelic_plugin_agent/constants.py
|
ambitioninc/django-newrelic-plugin-agent
|
8ae3029ffd86d6e3c81a59e54b65a3e1f86ba654
|
[
"MIT"
] | 2
|
2016-04-07T19:20:11.000Z
|
2016-11-29T14:42:22.000Z
|
# Namespace of agent mutex ID
# Always append guid of metric when obtaining a lock
AGENT_MUTEX_ID_NAMESPACE = 'NewrelicPluginAgent'
| 33
| 52
| 0.818182
|
11834f5f189ca9e4b93a600c4dfe0d61b5de4be1
| 12,276
|
py
|
Python
|
venv/lib/python3.6/site-packages/weasyprint/layout/columns.py
|
jyoost/saleordjangooriginal
|
e7d0da1f6f653607580a9cd792acfc4917908649
|
[
"CC-BY-4.0"
] | 2
|
2019-12-06T15:40:14.000Z
|
2020-07-29T21:30:35.000Z
|
venv/lib/python3.6/site-packages/weasyprint/layout/columns.py
|
jyoost/saleor
|
e7d0da1f6f653607580a9cd792acfc4917908649
|
[
"CC-BY-4.0"
] | 13
|
2020-03-24T17:53:51.000Z
|
2022-02-10T20:01:14.000Z
|
venv/lib/python3.6/site-packages/weasyprint/layout/columns.py
|
jyoost/saleor
|
e7d0da1f6f653607580a9cd792acfc4917908649
|
[
"CC-BY-4.0"
] | null | null | null |
"""
weasyprint.layout.columns
-------------------------
Layout for columns.
:copyright: Copyright 2011-2019 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from math import floor
from .absolute import absolute_layout
from .percentages import resolve_percentages
def columns_layout(context, box, max_position_y, skip_stack, containing_block,
page_is_empty, absolute_boxes, fixed_boxes,
adjoining_margins):
"""Lay out a multi-column ``box``."""
# Avoid circular imports
from .blocks import (
block_box_layout, block_level_layout, block_level_width,
collapse_margin)
# Implementation of the multi-column pseudo-algorithm:
# https://www.w3.org/TR/css3-multicol/#pseudo-algorithm
width = None
style = box.style
original_max_position_y = max_position_y
if box.style['position'] == 'relative':
# New containing block, use a new absolute list
absolute_boxes = []
box = box.copy_with_children(box.children)
box.position_y += collapse_margin(adjoining_margins) - box.margin_top
height = box.style['height']
if height != 'auto' and height.unit != '%':
assert height.unit == 'px'
known_height = True
max_position_y = min(
max_position_y, box.content_box_y() + height.value)
else:
known_height = False
# TODO: the available width can be unknown if the containing block needs
# the size of this block to know its own size.
block_level_width(box, containing_block)
available_width = box.width
if style['column_width'] == 'auto' and style['column_count'] != 'auto':
count = style['column_count']
width = max(
0, available_width - (count - 1) * style['column_gap']) / count
elif (style['column_width'] != 'auto' and
style['column_count'] == 'auto'):
count = max(1, int(floor(
(available_width + style['column_gap']) /
(style['column_width'] + style['column_gap']))))
width = (
(available_width + style['column_gap']) / count -
style['column_gap'])
else:
count = min(style['column_count'], int(floor(
(available_width + style['column_gap']) /
(style['column_width'] + style['column_gap']))))
width = (
(available_width + style['column_gap']) / count -
style['column_gap'])
def create_column_box(children):
column_box = box.anonymous_from(box, children=children)
resolve_percentages(column_box, containing_block)
column_box.is_column = True
column_box.width = width
column_box.position_x = box.content_box_x()
column_box.position_y = box.content_box_y()
return column_box
# Handle column-span property.
# We want to get the following structure:
# columns_and_blocks = [
# [column_child_1, column_child_2],
# spanning_block,
# …
# ]
columns_and_blocks = []
column_children = []
for child in box.children:
if child.style['column_span'] == 'all':
if column_children:
columns_and_blocks.append(column_children)
columns_and_blocks.append(child.copy())
column_children = []
continue
column_children.append(child.copy())
if column_children:
columns_and_blocks.append(column_children)
if not box.children:
next_page = {'break': 'any', 'page': None}
skip_stack = None
# Balance.
#
# The current algorithm starts from the ideal height (the total height
# divided by the number of columns). We then iterate until the last column
# is not the highest one. At the end of each loop, we add the minimal
# height needed to make one direct child at the top of one column go to the
# end of the previous column.
#
# We rely on a real rendering for each loop, and with a stupid algorithm
# like this it can last minutes…
adjoining_margins = []
current_position_y = box.content_box_y()
new_children = []
for column_children_or_block in columns_and_blocks:
if not isinstance(column_children_or_block, list):
# We get a spanning block, we display it like other blocks.
block = column_children_or_block
resolve_percentages(block, containing_block)
block.position_x = box.content_box_x()
block.position_y = current_position_y
new_child, _, _, adjoining_margins, _ = block_level_layout(
context, block, original_max_position_y, skip_stack,
containing_block, page_is_empty, absolute_boxes, fixed_boxes,
adjoining_margins)
new_children.append(new_child)
current_position_y = (
new_child.border_height() + new_child.border_box_y())
adjoining_margins.append(new_child.margin_bottom)
continue
excluded_shapes = context.excluded_shapes[:]
# We have a list of children that we have to balance between columns.
column_children = column_children_or_block
# Find the total height of the content
current_position_y += collapse_margin(adjoining_margins)
adjoining_margins = []
column_box = create_column_box(column_children)
new_child, _, _, _, _ = block_box_layout(
context, column_box, float('inf'), skip_stack, containing_block,
page_is_empty, [], [], [])
height = new_child.margin_height()
if style['column_fill'] == 'balance':
height /= count
# Try to render columns until the content fits, increase the column
# height step by step.
column_skip_stack = skip_stack
lost_space = float('inf')
while True:
# Remove extra excluded shapes introduced during previous loop
new_excluded_shapes = (
len(context.excluded_shapes) - len(excluded_shapes))
for i in range(new_excluded_shapes):
context.excluded_shapes.pop()
for i in range(count):
# Render the column
new_box, resume_at, next_page, _, _ = block_box_layout(
context, column_box, box.content_box_y() + height,
column_skip_stack, containing_block, page_is_empty,
[], [], [])
if new_box is None:
# We didn't render anything. Give up and use the max
# content height.
height *= count
continue
column_skip_stack = resume_at
in_flow_children = [
child for child in new_box.children
if child.is_in_normal_flow()]
if in_flow_children:
# Get the empty space at the bottom of the column box
empty_space = height - (
in_flow_children[-1].position_y - box.content_box_y() +
in_flow_children[-1].margin_height())
# Get the minimum size needed to render the next box
next_box, _, _, _, _ = block_box_layout(
context, column_box, box.content_box_y(),
column_skip_stack, containing_block, True, [], [], [])
for child in next_box.children:
if child.is_in_normal_flow():
next_box_size = child.margin_height()
break
else:
empty_space = next_box_size = 0
# Append the size needed to render the next box in this
# column.
#
# The next box size may be smaller than the empty space, for
# example when the next box can't be separated from its own
# next box. In this case we don't try to find the real value
# and let the workaround below fix this for us.
#
# We also want to avoid very small values that may have been
# introduced by rounding errors. As the workaround below at
# least adds 1 pixel for each loop, we can ignore lost spaces
# lower than 1px.
if next_box_size - empty_space > 1:
lost_space = min(lost_space, next_box_size - empty_space)
# Stop if we already rendered the whole content
if resume_at is None:
break
if column_skip_stack is None:
# We rendered the whole content, stop
break
else:
if lost_space == float('inf'):
# We didn't find the extra size needed to render a child in
# the previous column, increase height by the minimal
# value.
height += 1
else:
# Increase the columns heights and render them once again
height += lost_space
column_skip_stack = skip_stack
# TODO: check box.style['max']-height
max_position_y = min(max_position_y, box.content_box_y() + height)
# Replace the current box children with columns
i = 0
max_column_height = 0
columns = []
while True:
if i == count - 1:
max_position_y = original_max_position_y
column_box = create_column_box(column_children)
column_box.position_y = current_position_y
if style['direction'] == 'rtl':
column_box.position_x += (
box.width - (i + 1) * width - i * style['column_gap'])
else:
column_box.position_x += i * (width + style['column_gap'])
new_child, column_skip_stack, column_next_page, _, _ = (
block_box_layout(
context, column_box, max_position_y, skip_stack,
containing_block, page_is_empty, absolute_boxes,
fixed_boxes, None))
if new_child is None:
break
next_page = column_next_page
skip_stack = column_skip_stack
columns.append(new_child)
max_column_height = max(
max_column_height, new_child.margin_height())
if skip_stack is None:
break
i += 1
if i == count and not known_height:
# [If] a declaration that constrains the column height
# (e.g., using height or max-height). In this case,
# additional column boxes are created in the inline
# direction.
break
current_position_y += max_column_height
for column in columns:
column.height = max_column_height
new_children.append(column)
if box.children and not new_children:
# The box has children but none can be drawn, let's skip the whole box
return None, (0, None), {'break': 'any', 'page': None}, [], False
# Set the height of box and the columns
box.children = new_children
current_position_y += collapse_margin(adjoining_margins)
if box.height == 'auto':
box.height = current_position_y - box.position_y
height_difference = 0
else:
height_difference = box.height - (current_position_y - box.position_y)
if box.min_height != 'auto' and box.min_height > box.height:
height_difference += box.min_height - box.height
box.height = box.min_height
for child in new_children[::-1]:
if child.is_column:
child.height += height_difference
else:
break
if box.style['position'] == 'relative':
# New containing block, resolve the layout of the absolute descendants
for absolute_box in absolute_boxes:
absolute_layout(context, absolute_box, box, fixed_boxes)
return box, skip_stack, next_page, [], False
| 40.649007
| 79
| 0.586347
|
75e228d72a643744244d19709e2c714ed00d868a
| 25,096
|
py
|
Python
|
cumulusci/tasks/bulkdata.py
|
1handclapping/CumulusCI
|
cb7b061d049c5f05503a4ef23ac198342496a949
|
[
"BSD-3-Clause"
] | null | null | null |
cumulusci/tasks/bulkdata.py
|
1handclapping/CumulusCI
|
cb7b061d049c5f05503a4ef23ac198342496a949
|
[
"BSD-3-Clause"
] | null | null | null |
cumulusci/tasks/bulkdata.py
|
1handclapping/CumulusCI
|
cb7b061d049c5f05503a4ef23ac198342496a949
|
[
"BSD-3-Clause"
] | null | null | null |
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from contextlib import contextmanager
import csv
import datetime
import io
import itertools
import time
import tempfile
import xml.etree.ElementTree as ET
from salesforce_bulk.util import IteratorBytesIO
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import aliased
from sqlalchemy.orm import create_session
from sqlalchemy.orm import mapper
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Unicode
from sqlalchemy import text
from sqlalchemy import types
from sqlalchemy import event
import requests
import unicodecsv
from cumulusci.core.utils import process_bool_arg, ordered_yaml_load
from cumulusci.core.exceptions import BulkDataException
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from cumulusci.utils import log_progress
# TODO: UserID Catcher
# TODO: Dater
# Create a custom sqlalchemy field type for sqlite datetime fields which are stored as integer of epoch time
class EpochType(types.TypeDecorator):
impl = types.Integer
epoch = datetime.datetime(1970, 1, 1, 0, 0, 0)
def process_bind_param(self, value, dialect):
return int((value - self.epoch).total_seconds()) * 1000
def process_result_value(self, value, dialect):
return self.epoch + datetime.timedelta(seconds=value / 1000)
# Listen for sqlalchemy column_reflect event and map datetime fields to EpochType
@event.listens_for(Table, "column_reflect")
def setup_epoch(inspector, table, column_info):
if isinstance(column_info["type"], types.DateTime):
column_info["type"] = EpochType()
class BulkJobTaskMixin(object):
def _job_state_from_batches(self, job_id):
uri = "{}/job/{}/batch".format(self.bulk.endpoint, job_id)
response = requests.get(uri, headers=self.bulk.headers())
return self._parse_job_state(response.content)
def _parse_job_state(self, xml):
tree = ET.fromstring(xml)
completed = 0
pending = 0
failed = 0
for el in tree.iterfind(".//{%s}state" % self.bulk.jobNS):
state = el.text
if state == "Not Processed":
return "Aborted"
elif state == "Failed":
failed += 1
elif state == "Completed":
completed += 1
else: # Queued, InProgress
pending += 1
if pending:
return "InProgress"
elif failed:
return "Failed"
else:
return "Completed"
def _wait_for_job(self, job_id):
while True:
job_status = self.bulk.job_status(job_id)
self.logger.info(
" Waiting for job {} ({}/{})".format(
job_id,
job_status["numberBatchesCompleted"],
job_status["numberBatchesTotal"],
)
)
result = self._job_state_from_batches(job_id)
if result != "InProgress":
break
time.sleep(10)
self.logger.info("Job {} finished with result: {}".format(job_id, result))
return result
def _sql_bulk_insert_from_csv(self, conn, table, columns, data_file):
if conn.dialect.name == "psycopg2":
# psycopg2 (the postgres driver) supports COPY FROM
# to efficiently bulk insert rows in CSV format
with conn.connection.cursor() as cursor:
cursor.copy_expert(
"COPY {} ({}) FROM STDIN WITH (FORMAT CSV)".format(
table, ",".join(columns)
),
data_file,
)
else:
# For other db drivers we need to use standard SQL
# -- this is optimized for ease of implementation
# rather than performance and may need more work.
reader = unicodecsv.DictReader(data_file, columns)
table = self.metadata.tables[table]
rows = list(reader)
if rows:
conn.execute(table.insert().values(rows))
self.session.flush()
class DeleteData(BaseSalesforceApiTask, BulkJobTaskMixin):
task_options = {
"objects": {
"description": "A list of objects to delete records from in order of deletion. If passed via command line, use a comma separated string",
"required": True,
},
"hardDelete": {
"description": "If True, perform a hard delete, bypassing the recycle bin. Default: False"
},
}
def _init_options(self, kwargs):
super(DeleteData, self)._init_options(kwargs)
# Split and trim objects string into a list if not already a list
if not isinstance(self.options["objects"], list):
self.options["objects"] = [
obj.strip() for obj in self.options["objects"].split(",")
]
self.options["hardDelete"] = process_bool_arg(self.options.get("hardDelete"))
def _run_task(self):
for obj in self.options["objects"]:
self.logger.info("Deleting all {} records".format(obj))
delete_job = self._create_job(obj)
if delete_job is not None:
self._wait_for_job(delete_job)
def _create_job(self, obj):
# Query for rows to delete
delete_rows = self._query_salesforce_for_records_to_delete(obj)
if not delete_rows:
self.logger.info(" No {} objects found, skipping delete".format(obj))
return
# Upload all the batches
operation = "hardDelete" if self.options["hardDelete"] else "delete"
delete_job = self.bulk.create_job(obj, operation)
self.logger.info(" Deleting {} {} records".format(len(delete_rows), obj))
batch_num = 1
for batch in self._upload_batches(delete_job, delete_rows):
self.logger.info(" Uploaded batch {}".format(batch))
batch_num += 1
self.bulk.close_job(delete_job)
return delete_job
def _query_salesforce_for_records_to_delete(self, obj):
# Query for all record ids
self.logger.info(" Querying for all {} objects".format(obj))
query_job = self.bulk.create_query_job(obj, contentType="CSV")
batch = self.bulk.query(query_job, "select Id from {}".format(obj))
while not self.bulk.is_batch_done(batch, query_job):
time.sleep(10)
self.bulk.close_job(query_job)
delete_rows = []
for result in self.bulk.get_all_results_for_query_batch(batch, query_job):
reader = unicodecsv.DictReader(result, encoding="utf-8")
for row in reader:
delete_rows.append(row)
return delete_rows
def _split_batches(self, data, batch_size):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(data), batch_size):
yield data[i : i + batch_size]
def _upload_batches(self, job, data):
uri = "{}/job/{}/batch".format(self.bulk.endpoint, job)
headers = self.bulk.headers({"Content-Type": "text/csv"})
for batch in self._split_batches(data, 10000):
rows = ['"Id"']
rows += ['"{}"'.format(record["Id"]) for record in batch]
resp = requests.post(uri, data="\n".join(rows), headers=headers)
content = resp.content
if resp.status_code >= 400:
self.bulk.raise_error(content, resp.status_code)
tree = ET.fromstring(content)
batch_id = tree.findtext("{%s}id" % self.bulk.jobNS)
yield batch_id
class LoadData(BulkJobTaskMixin, BaseSalesforceApiTask):
task_options = {
"database_url": {
"description": "The database url to a database containing the test data to load",
"required": True,
},
"mapping": {
"description": "The path to a yaml file containing mappings of the database fields to Salesforce object fields",
"required": True,
},
"start_step": {
"description": "If specified, skip steps before this one in the mapping",
"required": False,
},
}
def _run_task(self):
self._init_mapping()
self._init_db()
start_step = self.options.get("start_step")
started = False
for name, mapping in self.mapping.items():
# Skip steps until start_step
if not started and start_step and name != start_step:
self.logger.info("Skipping step: {}".format(name))
continue
started = True
self.logger.info("Running Job: {}".format(name))
result = self._load_mapping(mapping)
if result != "Completed":
break
def _load_mapping(self, mapping):
"""Load data for a single step."""
job_id, local_ids_for_batch = self._create_job(mapping)
result = self._wait_for_job(job_id)
# We store inserted ids even if some batches failed
self._store_inserted_ids(mapping, job_id, local_ids_for_batch)
return result
def _create_job(self, mapping):
"""Initiate a bulk insert and upload batches to run in parallel."""
job_id = self.bulk.create_insert_job(mapping["sf_object"], contentType="CSV")
self.logger.info(" Created bulk job {}".format(job_id))
# Upload batches
local_ids_for_batch = {}
for batch_file, local_ids in self._get_batches(mapping):
batch_id = self.bulk.post_batch(job_id, batch_file)
local_ids_for_batch[batch_id] = local_ids
self.logger.info(" Uploaded batch {}".format(batch_id))
self.bulk.close_job(job_id)
return job_id, local_ids_for_batch
def _get_batches(self, mapping, batch_size=10000):
"""Get data from the local db"""
action = mapping.get("action", "insert")
fields = mapping.get("fields", {}).copy()
static = mapping.get("static", {})
lookups = mapping.get("lookups", {})
record_type = mapping.get("record_type")
# Skip Id field on insert
if action == "insert" and "Id" in fields:
del fields["Id"]
# Build the list of fields to import
columns = []
columns.extend(fields.keys())
columns.extend(lookups.keys())
columns.extend(static.keys())
if record_type:
columns.append("RecordTypeId")
# default to the profile assigned recordtype if we can't find any
# query for the RT by developer name
query = (
"SELECT Id FROM RecordType WHERE SObjectType='{0}'"
"AND DeveloperName = '{1}' LIMIT 1"
)
record_type_id = self.sf.query(
query.format(mapping.get("sf_object"), record_type)
)["records"][0]["Id"]
query = self._query_db(mapping)
total_rows = 0
batch_num = 1
def start_batch():
batch_file = io.BytesIO()
writer = unicodecsv.writer(batch_file)
writer.writerow(columns)
batch_ids = []
return batch_file, writer, batch_ids
batch_file, writer, batch_ids = start_batch()
for row in query.yield_per(batch_size):
total_rows += 1
# Add static values to row
pkey = row[0]
row = list(row[1:]) + list(static.values())
if record_type:
row.append(record_type_id)
writer.writerow([self._convert(value) for value in row])
batch_ids.append(pkey)
# Yield and start a new file every [batch_size] rows
if not total_rows % batch_size:
batch_file.seek(0)
self.logger.info(" Processing batch {}".format(batch_num))
yield batch_file, batch_ids
batch_file, writer, batch_ids = start_batch()
batch_num += 1
# Yield result file for final batch
if batch_ids:
batch_file.seek(0)
yield batch_file, batch_ids
self.logger.info(
" Prepared {} rows for import to {}".format(
total_rows, mapping["sf_object"]
)
)
def _query_db(self, mapping):
"""Build a query to retrieve data from the local db.
Includes columns from the mapping
as well as joining to the id tables to get real SF ids
for lookups.
"""
model = self.models[mapping.get("table")]
# Use primary key instead of the field mapped to SF Id
fields = mapping["fields"].copy()
del fields["Id"]
id_column = model.__table__.primary_key.columns.keys()[0]
columns = [getattr(model, id_column)]
for f in fields.values():
columns.append(model.__table__.columns[f])
lookups = mapping.get("lookups", {}).copy().values()
for lookup in lookups:
lookup["aliased_table"] = aliased(
self.metadata.tables["{}_sf_ids".format(lookup["table"])]
)
columns.append(lookup["aliased_table"].columns.sf_id)
query = self.session.query(*columns)
if "record_type" in mapping and hasattr(model, "record_type"):
query = query.filter(model.record_type == mapping["record_type"])
if "filters" in mapping:
filter_args = []
for f in mapping["filters"]:
filter_args.append(text(f))
query = query.filter(*filter_args)
for lookup in lookups:
# Outer join with lookup ids table:
# returns main obj even if lookup is null
value_column = getattr(model, lookup["key_field"])
query = query.outerjoin(
lookup["aliased_table"],
lookup["aliased_table"].columns.id == value_column,
)
# Order by foreign key to minimize lock contention
# by trying to keep lookup targets in the same batch
lookup_column = getattr(model, lookup["key_field"])
query = query.order_by(lookup_column)
self.logger.info(str(query))
return query
def _convert(self, value):
if value:
if isinstance(value, datetime.datetime):
return value.isoformat()
return value
def _store_inserted_ids(self, mapping, job_id, local_ids_for_batch):
"""Get the job results and store inserted SF Ids in a new table"""
id_table_name = self._reset_id_table(mapping)
conn = self.session.connection()
for batch_id, local_ids in local_ids_for_batch.items():
try:
results_url = "{}/job/{}/batch/{}/result".format(
self.bulk.endpoint, job_id, batch_id
)
# Download entire result file to a temporary file first
# to avoid the server dropping connections
with _download_file(results_url, self.bulk) as f:
self.logger.info(
" Downloaded results for batch {}".format(batch_id)
)
self._store_inserted_ids_for_batch(
f, local_ids, id_table_name, conn
)
self.logger.info(
" Updated {} for batch {}".format(id_table_name, batch_id)
)
except Exception: # pragma: nocover
# If we can't download one result file,
# don't let that stop us from downloading the others
self.logger.error(
"Could not download batch results: {}".format(batch_id)
)
continue
self.session.commit()
def _reset_id_table(self, mapping):
"""Create an empty table to hold the inserted SF Ids"""
if not hasattr(self, "_initialized_id_tables"):
self._initialized_id_tables = set()
id_table_name = "{}_sf_ids".format(mapping["table"])
if id_table_name not in self._initialized_id_tables:
if id_table_name in self.metadata.tables:
self.metadata.remove(self.metadata.tables[id_table_name])
id_table = Table(
id_table_name,
self.metadata,
Column("id", Unicode(255), primary_key=True),
Column("sf_id", Unicode(18)),
)
if id_table.exists():
id_table.drop()
id_table.create()
self._initialized_id_tables.add(id_table_name)
return id_table_name
def _store_inserted_ids_for_batch(
self, result_file, local_ids, id_table_name, conn
):
# Set up a function to generate rows based on this result file
def produce_csv():
"""Iterate over job results and prepare rows for id table"""
reader = unicodecsv.reader(result_file)
next(reader) # skip header
i = 0
for row, local_id in zip(reader, local_ids):
if row[1] == "true": # Success
sf_id = row[0]
yield "{},{}\n".format(local_id, sf_id).encode("utf-8")
else:
self.logger.warning(" Error on row {}: {}".format(i, row[3]))
i += 1
# Bulk insert rows into id table
columns = ("id", "sf_id")
data_file = IteratorBytesIO(produce_csv())
self._sql_bulk_insert_from_csv(conn, id_table_name, columns, data_file)
def _init_db(self):
# initialize the DB engine
self.engine = create_engine(self.options["database_url"])
# initialize DB metadata
self.metadata = MetaData()
self.metadata.bind = self.engine
# initialize the automap mapping
self.base = automap_base(bind=self.engine, metadata=self.metadata)
self.base.prepare(self.engine, reflect=True)
# Loop through mappings and reflect each referenced table
self.models = {}
for name, mapping in self.mapping.items():
if "table" in mapping and mapping["table"] not in self.models:
self.models[mapping["table"]] = self.base.classes[mapping["table"]]
# initialize the DB session
self.session = Session(self.engine)
def _init_mapping(self):
with open(self.options["mapping"], "r") as f:
self.mapping = ordered_yaml_load(f)
class QueryData(BulkJobTaskMixin, BaseSalesforceApiTask):
task_options = {
"database_url": {
"description": "A DATABASE_URL where the query output should be written",
"required": True,
},
"mapping": {
"description": "The path to a yaml file containing mappings of the database fields to Salesforce object fields",
"required": True,
},
}
def _run_task(self):
self._init_mapping()
self._init_db()
for mapping in self.mappings.values():
soql = self._soql_for_mapping(mapping)
self._run_query(soql, mapping)
def _init_db(self):
self.models = {}
# initialize the DB engine
self.engine = create_engine(self.options["database_url"])
# initialize DB metadata
self.metadata = MetaData()
self.metadata.bind = self.engine
# Create the tables
self._create_tables()
# initialize the automap mapping
self.base = automap_base(bind=self.engine, metadata=self.metadata)
self.base.prepare(self.engine, reflect=True)
# initialize session
self.session = create_session(bind=self.engine, autocommit=False)
def _init_mapping(self):
with open(self.options["mapping"], "r") as f:
self.mappings = ordered_yaml_load(f)
def _soql_for_mapping(self, mapping):
sf_object = mapping["sf_object"]
fields = [field["sf"] for field in self._fields_for_mapping(mapping)]
soql = "SELECT {fields} FROM {sf_object}".format(
**{"fields": ", ".join(fields), "sf_object": sf_object}
)
if "record_type" in mapping:
soql += " WHERE RecordType.DeveloperName = '{}'".format(
mapping["record_type"]
)
return soql
def _run_query(self, soql, mapping):
self.logger.info("Creating bulk job for: {sf_object}".format(**mapping))
job = self.bulk.create_query_job(mapping["sf_object"], contentType="CSV")
self.logger.info("Job id: {0}".format(job))
self.logger.info("Submitting query: {}".format(soql))
batch = self.bulk.query(job, soql)
self.logger.info("Batch id: {0}".format(batch))
self.bulk.wait_for_batch(job, batch)
self.logger.info("Batch {0} finished".format(batch))
self.bulk.close_job(job)
self.logger.info("Job {0} closed".format(job))
conn = self.session.connection()
for result_file in self._get_results(batch, job):
self._import_results(mapping, result_file, conn)
def _get_results(self, batch_id, job_id):
result_ids = self.bulk.get_query_batch_result_ids(batch_id, job_id=job_id)
for result_id in result_ids:
self.logger.info("Result id: {}".format(result_id))
uri = "{}/job/{}/batch/{}/result/{}".format(
self.bulk.endpoint, job_id, batch_id, result_id
)
with _download_file(uri, self.bulk) as f:
self.logger.info("Result {} downloaded".format(result_id))
yield f
def _import_results(self, mapping, result_file, conn):
# Map SF field names to local db column names
sf_header = [
name.strip('"')
for name in result_file.readline().strip().decode("utf-8").split(",")
]
columns = []
for sf in sf_header:
if sf == "Records not found for this query":
return
if sf:
column = mapping["fields"].get(sf)
if not column:
column = mapping.get("lookups", {}).get(sf, {}).get("key_field")
if column:
columns.append(column)
if not columns:
return
record_type = mapping.get("record_type")
if record_type:
columns.append("record_type")
processor = log_progress(
process_incoming_rows(result_file, record_type), self.logger
)
data_file = IteratorBytesIO(processor)
self._sql_bulk_insert_from_csv(conn, mapping["table"], columns, data_file)
self.session.commit()
def _create_tables(self):
for mapping in self.mappings.values():
self._create_table(mapping)
self.metadata.create_all()
def _create_table(self, mapping):
model_name = "{}Model".format(mapping["table"])
mapper_kwargs = {}
table_kwargs = {}
if mapping["table"] in self.models:
raise BulkDataException("Table already exists: {}".format(mapping["table"]))
self.models[mapping["table"]] = type(model_name, (object,), {})
id_column = mapping["fields"].get("Id") or "id"
fields = []
fields.append(Column(id_column, Unicode(255), primary_key=True))
for field in self._fields_for_mapping(mapping):
if field["sf"] == "Id":
continue
fields.append(Column(field["db"], Unicode(255)))
if "record_type" in mapping:
fields.append(Column("record_type", Unicode(255)))
t = Table(mapping["table"], self.metadata, *fields, **table_kwargs)
mapper(self.models[mapping["table"]], t, **mapper_kwargs)
def _fields_for_mapping(self, mapping):
fields = []
for sf_field, db_field in mapping.get("fields", {}).items():
fields.append({"sf": sf_field, "db": db_field})
for sf_field, lookup in mapping.get("lookups", {}).items():
fields.append({"sf": sf_field, "db": lookup["key_field"]})
return fields
@contextmanager
def _download_file(uri, bulk_api):
"""Download the bulk API result file for a single batch"""
resp = requests.get(uri, headers=bulk_api.headers(), stream=True)
with tempfile.TemporaryFile("w+b") as f:
for chunk in resp.iter_content(chunk_size=None):
f.write(chunk)
f.seek(0)
yield f
def process_incoming_rows(f, record_type=None):
if record_type and not isinstance(record_type, bytes):
record_type = record_type.encode("utf-8")
for line in f:
if record_type:
yield line + b"," + record_type + b"\n"
else:
yield line
| 37.909366
| 150
| 0.593521
|
ef4efb9145edf1bb71b3e6ae40a2ce3c97e9e35d
| 4,404
|
py
|
Python
|
profiles_api/views.py
|
solrakmnk/profiles-rest-api
|
7496ba93aaf4abeb8a64f2b4b8e356b9c872a61a
|
[
"MIT"
] | null | null | null |
profiles_api/views.py
|
solrakmnk/profiles-rest-api
|
7496ba93aaf4abeb8a64f2b4b8e356b9c872a61a
|
[
"MIT"
] | 7
|
2020-01-08T15:10:40.000Z
|
2022-02-10T11:59:38.000Z
|
profiles_api/views.py
|
solrakmnk/profiles-rest-api
|
7496ba93aaf4abeb8a64f2b4b8e356b9c872a61a
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from profiles_api import serializers
from . import models
from . import permissions
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Returns a list of APIView features"""
an_apiview = [
'Uses HTTP methods as functions (get, post, patch, put, delete)',
'Is similar to a traditional Django View',
'Gives you the most control over your logic',
'Is mapped manually to URLs',
]
return Response({'message': 'Hello!', 'an_apiview': an_apiview})
def post(self, request):
"""Create Hello message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
"""Handle updating an object"""
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
"""Handle partial update of an object"""
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
"""Delete an object"""
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
a_viewset = [
'Uses actions (list,create,retrieve,update,partial_update)',
'Automatically maps to URLS using providers',
'Provides more functionality with less code',
]
return Response({'message': 'Hello!', 'a_viewset': a_viewset})
def create(self, request):
"""Create a new hello message."""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""Handle getting an object by its ID"""
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
"""Handle updating an object"""
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
"""Handle updating part of an object"""
return Response({'http_method': 'PATCH'})
def destroy(self, request, pk=None):
"""Handle removing an object"""
return Response({'http_method': 'DELETE'})
class UsersProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'password')
class UserLoginApiView(ObtainAuthToken):
"""Handle creating user authentication tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handles creating, reading and updating profile feed items"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
permission_classes = (permissions.UpdateOwnStatus, IsAuthenticated)
queryset = models.ProfileFeedItem.objects.all()
def perform_create(self, serializer):
"""Sets the user profile to the logged in user"""
serializer.save(user_profile=self.request.user)
| 33.618321
| 77
| 0.665758
|
f247e0dd4db4ee188b1daf3e72227ac2057daa44
| 25,841
|
py
|
Python
|
tests/test_tasks.py
|
inducer/courseflow
|
0f9786e3616dbedf08365d81a731f672b97ba9f5
|
[
"Unlicense"
] | null | null | null |
tests/test_tasks.py
|
inducer/courseflow
|
0f9786e3616dbedf08365d81a731f672b97ba9f5
|
[
"Unlicense"
] | null | null | null |
tests/test_tasks.py
|
inducer/courseflow
|
0f9786e3616dbedf08365d81a731f672b97ba9f5
|
[
"Unlicense"
] | null | null | null |
__copyright__ = "Copyright (C) 2018 Dong Zhuang"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import pytest
from django.utils.timezone import now, timedelta
from django.test import TestCase, override_settings
from course import models
from course.tasks import (
expire_in_progress_sessions,
finish_in_progress_sessions,
regrade_flow_sessions,
recalculate_ended_sessions,
purge_page_view_data,
)
from tests.base_test_mixins import SingleCourseTestMixin, TwoCoursePageTestMixin
from tests.test_flow.test_purge_page_view_data import (
PURGE_VIEW_TWO_COURSE_SETUP_LIST)
from tests import factories
from tests.utils import mock
from tests.constants import QUIZ_FLOW_ID
from pkg_resources import parse_version
import celery
is_celery_4_or_higher = parse_version(celery.__version__) >= parse_version("4.0.0")
class TaskTestMixin:
"""
This test is actually testing without celery dependency.
"""
def setUp(self):
super().setUp()
# Emulates the behavior of AsyncResult
if is_celery_4_or_higher:
override_settings_kwargs = {"task_always_eager": True}
else:
override_settings_kwargs = {
"CELERY_TASK_ALWAYS_EAGER": True,
"CELERY_EAGER_PROPAGATES_EXCEPTIONS": True,
"BROKER_BACKEND": 'memory'
}
celery_fake_overriding = (
override_settings(**override_settings_kwargs))
celery_fake_overriding.enable()
self.addCleanup(celery_fake_overriding.disable)
update_state_patcher = mock.patch(
"celery.app.task.Task.update_state", side_effect=mock.MagicMock)
self.mock_update_state = update_state_patcher.start()
self.addCleanup(update_state_patcher.stop)
class GradesTasksTestSetUpMixin:
def create_flow_sessions(self, course,
n_participations_per_course=4,
n_in_progress_sessions_per_participation=1,
n_ended_sessions_per_participation=2):
"""
Create multiple flow_sessions with a single gopp
:param course::class:`Course`
:param n_participations_per_course: number of participation created for
each course
:param n_in_progress_sessions_per_participation: number of in_progress
sessions created for each participation
:param n_ended_sessions_per_participation: number of ended sessions
created for each participation
"""
self.gopp = factories.GradingOpportunityFactory(course=course)
participations = factories.ParticipationFactory.create_batch(
n_participations_per_course)
for p in participations:
factories.FlowSessionFactory.create_batch(
size=n_in_progress_sessions_per_participation,
participation=p, in_progress=True)
factories.FlowSessionFactory.create_batch(
size=n_ended_sessions_per_participation,
participation=p,
in_progress=False)
all_sessions = models.FlowSession.objects.all()
self.all_sessions_count = all_sessions.count()
self.in_progress_sessions = list(all_sessions.filter(in_progress=True))
self.ended_sessions = list(all_sessions.filter(in_progress=False))
self.in_progress_sessions_count = len(self.in_progress_sessions)
self.ended_sessions_count = len(self.ended_sessions)
@pytest.mark.slow
class GradesTasksTest(SingleCourseTestMixin, GradesTasksTestSetUpMixin,
TaskTestMixin, TestCase):
def setUp(self):
super().setUp()
self.create_flow_sessions(self.course)
# reset the user_name format sequence
self.addCleanup(factories.UserFactory.reset_sequence)
# {{{ test expire_in_progress_sessions
def test_expire_in_progress_sessions_past_due_only_due_none(self):
# grading_rule.due is None
expire_in_progress_sessions(
self.gopp.course.id, self.gopp.flow_id,
rule_tag=None, now_datetime=now(),
past_due_only=True)
self.assertEqual(
models.FlowSession.objects.filter(in_progress=True).count(),
self.in_progress_sessions_count)
self.assertEqual(
models.FlowSession.objects.filter(in_progress=False).count(),
self.ended_sessions_count)
self.assertEqual(
self.mock_update_state.call_count, self.in_progress_sessions_count)
def test_expire_in_progress_sessions_past_due_only_dued(self):
# now_datetime > grading_rule.due
fake_grading_rule = self.get_hacked_session_grading_rule(
due=now() + timedelta(days=1))
with mock.patch("course.flow.get_session_grading_rule") as \
mock_get_grading_rule:
mock_get_grading_rule.return_value = fake_grading_rule
expire_in_progress_sessions(
self.gopp.course.id, self.gopp.flow_id,
rule_tag=None, now_datetime=now()+timedelta(days=3),
past_due_only=True)
# no in_progress sessions
self.assertEqual(
models.FlowSession.objects.filter(in_progress=True).count(),
0)
self.assertEqual(
models.FlowSession.objects.filter(in_progress=False).count(),
self.all_sessions_count)
self.assertEqual(
self.mock_update_state.call_count, self.in_progress_sessions_count)
def test_expire_in_progress_sessions_past_due_only_not_dued(self):
# now_datetime <= grading_rule.due
fake_grading_rule = self.get_hacked_session_grading_rule(
due=now() + timedelta(days=1))
with mock.patch("course.flow.get_session_grading_rule") as \
mock_get_grading_rule:
mock_get_grading_rule.return_value = fake_grading_rule
expire_in_progress_sessions(
self.gopp.course.id, self.gopp.flow_id,
rule_tag=None, now_datetime=now(),
past_due_only=True)
self.assertEqual(
models.FlowSession.objects.filter(in_progress=True).count(),
self.in_progress_sessions_count)
self.assertEqual(
models.FlowSession.objects.filter(in_progress=False).count(),
self.ended_sessions_count)
self.assertEqual(self.mock_update_state.call_count,
self.in_progress_sessions_count)
self.assertEqual(
self.mock_update_state.call_count, self.in_progress_sessions_count)
def test_expire_in_progress_sessions_all(self):
expire_in_progress_sessions(
self.gopp.course.id, self.gopp.flow_id,
rule_tag=None, now_datetime=now(),
past_due_only=False)
# no in_progress sessions
self.assertEqual(
models.FlowSession.objects.filter(in_progress=True).count(),
0)
self.assertEqual(
models.FlowSession.objects.filter(in_progress=False).count(),
self.all_sessions_count)
self.assertEqual(
self.mock_update_state.call_count, self.in_progress_sessions_count)
# }}}
# {{{ test finish_in_progress_sessions
def test_finish_in_progress_sessions_past_due_only_due_none(self):
# grading_rule.due is None
finish_in_progress_sessions(
self.gopp.course_id, self.gopp.flow_id,
rule_tag=None, now_datetime=now(),
past_due_only=True
)
self.assertEqual(
models.FlowSession.objects.filter(in_progress=True).count(),
self.in_progress_sessions_count)
self.assertEqual(
models.FlowSession.objects.filter(in_progress=False).count(),
self.ended_sessions_count)
self.assertEqual(
models.FlowPageVisitGrade.objects.count(), 0)
self.assertEqual(
self.mock_update_state.call_count, self.in_progress_sessions_count)
def test_finish_in_progress_sessions_past_due_only_dued(self):
# now_datetime > grading_rule.due
fake_grading_rule = self.get_hacked_session_grading_rule(
due=now() + timedelta(days=1))
with mock.patch("course.flow.get_session_grading_rule") as \
mock_get_grading_rule:
mock_get_grading_rule.return_value = fake_grading_rule
finish_in_progress_sessions(
self.gopp.course_id, self.gopp.flow_id,
rule_tag=None, now_datetime=now()+timedelta(days=3),
past_due_only=True)
self.assertEqual(
models.FlowSession.objects.filter(in_progress=True).count(),
0)
self.assertEqual(
models.FlowSession.objects.filter(in_progress=False).count(),
self.all_sessions_count)
self.assertEqual(
models.FlowPageVisitGrade.objects.filter(
visit__flow_session__in=self.ended_sessions).count(),
0)
for ended_session in self.in_progress_sessions:
self.assertTrue(
models.FlowPageVisitGrade.objects.filter(
visit__flow_session=ended_session).count() > 0)
self.assertEqual(
self.mock_update_state.call_count, self.in_progress_sessions_count)
def test_finish_in_progress_sessions_past_due_only_not_dued(self):
# now_datetime < grading_rule.due
fake_grading_rule = self.get_hacked_session_grading_rule(
due=now() + timedelta(days=1))
with mock.patch("course.flow.get_session_grading_rule") as \
mock_get_grading_rule:
mock_get_grading_rule.return_value = fake_grading_rule
finish_in_progress_sessions(
self.gopp.course_id, self.gopp.flow_id,
rule_tag=None, now_datetime=now(),
past_due_only=True
)
self.assertEqual(
models.FlowSession.objects.filter(in_progress=True).count(),
self.in_progress_sessions_count)
self.assertEqual(
models.FlowSession.objects.filter(in_progress=False).count(),
self.ended_sessions_count)
self.assertEqual(models.FlowPageVisitGrade.objects.count(), 0)
self.assertEqual(
self.mock_update_state.call_count, self.in_progress_sessions_count)
def test_finish_in_progress_sessions_all(self):
finish_in_progress_sessions(
self.gopp.course_id, self.gopp.flow_id,
rule_tag=None, now_datetime=now(),
past_due_only=False
)
self.assertEqual(
models.FlowSession.objects.filter(in_progress=True).count(),
0)
self.assertEqual(
models.FlowSession.objects.filter(in_progress=False).count(),
self.all_sessions_count)
self.assertEqual(
models.FlowPageVisitGrade.objects.filter(
visit__flow_session__in=self.ended_sessions).count(),
0
)
# each ended sessions in this operation got page grades
for session in self.in_progress_sessions:
self.assertTrue(
models.FlowPageVisitGrade.objects.filter(
visit__flow_session=session).count() > 0
)
# each previously ended sessions didn't got page grades
for session in self.ended_sessions:
self.assertTrue(
models.FlowPageVisitGrade.objects.filter(
visit__flow_session=session).count() == 0
)
self.assertEqual(
self.mock_update_state.call_count, self.in_progress_sessions_count)
# }}}
# {{{ test recalculate_ended_sessions
def test_recalculate_ended_sessions(self):
recalculate_ended_sessions(self.gopp.course_id,
self.gopp.flow_id,
rule_tag=None)
# because we didn't create grades, this operation will create them first
first_round_visit_grade_count = models.FlowPageVisitGrade.objects.count()
self.assertTrue(first_round_visit_grade_count > 0)
self.assertEqual(self.mock_update_state.call_count,
self.ended_sessions_count)
self.mock_update_state.reset_mock()
# second round
recalculate_ended_sessions(self.gopp.course_id,
self.gopp.flow_id,
rule_tag=None)
# count of page regrade won't increase
self.assertEqual(
models.FlowPageVisitGrade.objects.count(), first_round_visit_grade_count
)
self.assertEqual(self.mock_update_state.call_count,
self.ended_sessions_count)
# }}}
# {{{ test regrade_flow_sessions
def test_regrade_not_in_progress_only(self):
regrade_flow_sessions(self.gopp.course_id,
self.gopp.flow_id,
access_rules_tag=None,
inprog_value=False
)
# each previously ended session got page regrades
for session in self.ended_sessions:
self.assertTrue(
models.FlowPageVisitGrade.objects.filter(
visit__flow_session=session).count() > 0
)
self.assertEqual(self.mock_update_state.call_count,
self.ended_sessions_count)
self.mock_update_state.reset_mock()
first_round_visit_grade_count = models.FlowPageVisitGrade.objects.count()
regrade_flow_sessions(self.gopp.course_id,
self.gopp.flow_id,
access_rules_tag=None,
inprog_value=False
)
# number of visit grades increased
self.assertTrue(models.FlowPageVisitGrade.objects.count()
> first_round_visit_grade_count)
self.assertEqual(self.mock_update_state.call_count,
self.ended_sessions_count)
def test_regrade_in_progress_only(self):
regrade_flow_sessions(self.gopp.course_id,
self.gopp.flow_id,
access_rules_tag=None,
inprog_value=True
)
# ended session should not have page regrades
self.assertTrue(
models.FlowPageVisitGrade.objects.filter(
visit__flow_session__in=self.ended_sessions).count() == 0
)
# in-progress session got no page regrades, because we didn't
# submit a page
self.assertTrue(
models.FlowPageVisitGrade.objects.filter(
visit__flow_session__in=self.in_progress_sessions).count() == 0
)
self.assertEqual(self.mock_update_state.call_count,
self.in_progress_sessions_count)
def test_regrade_all(self):
# inprog_value=None means "any" page will be regraded disregard whether
# the session is in-progress
regrade_flow_sessions(self.gopp.course_id,
self.gopp.flow_id,
access_rules_tag=None,
inprog_value=None
)
# each ended session got page regrades
self.assertTrue(
models.FlowPageVisitGrade.objects.filter(
visit__flow_session__in=self.ended_sessions).count() > 0
)
# each in-progress session also got no page regrades
self.assertTrue(
models.FlowPageVisitGrade.objects.filter(
visit__flow_session__in=self.in_progress_sessions).count() == 0
)
def test_regrade_with_access_rules_tag(self):
with mock.patch("course.flow.regrade_session") as mock_regrade:
regrade_flow_sessions(self.gopp.course_id,
self.gopp.flow_id,
access_rules_tag="None exist tag",
inprog_value=None
)
mock_regrade.return_value = None
# no regrade happened
self.assertEqual(mock_regrade.call_count, 0)
first_session = models.FlowSession.objects.first()
first_session.access_rules_tag = "some tag"
first_session.save()
regrade_flow_sessions(self.gopp.course_id,
self.gopp.flow_id,
access_rules_tag="some tag",
inprog_value=None
)
self.assertEqual(mock_regrade.call_count, 1)
self.assertIn(first_session, mock_regrade.call_args[0])
self.assertEqual(self.mock_update_state.call_count, 1)
# }}}
class PurgePageViewDataTaskTestSetUpMixin:
def create_flow_page_visit(self, course,
n_participations_per_course=5,
n_sessions_per_participation=1,
n_null_answer_visits_per_session=5,
n_non_null_answer_visits_per_session=3):
"""
:param course::class:`Course`
:param n_participations_per_course: number of participation created for
each course
:param n_sessions_per_participation: number of session created for
each participation
:param n_null_answer_visits_per_session: number of flowpagevisit, which does
not have an answer, created for each session
:param n_non_null_answer_visits_per_session: number of flowpagevisit, which
has an answer, created for each session
:return::class:`Tuple`: number of all flow_page_visits, number of null
answer flow_page_visits, and number of non-null answer flow_page_visits.
"""
my_course = factories.CourseFactory(identifier=course.identifier)
participations = factories.ParticipationFactory.create_batch(
size=n_participations_per_course, course=my_course)
for participation in participations:
flow_sessions = factories.FlowSessionFactory.create_batch(
size=n_sessions_per_participation, participation=participation)
for flow_session in flow_sessions:
null_anaswer_fpds = factories.FlowPageDataFactory.create_batch(
size=n_null_answer_visits_per_session, flow_session=flow_session
)
for fpd in null_anaswer_fpds:
factories.FlowPageVisitFactory.create(page_data=fpd)
non_null_anaswer_fpds = factories.FlowPageDataFactory.create_batch(
size=n_non_null_answer_visits_per_session,
flow_session=flow_session
)
for fpd in non_null_anaswer_fpds:
factories.FlowPageVisitFactory.create(
page_data=fpd,
answer={"answer": "abcd"})
n_null_answer_fpv = (
n_participations_per_course
* n_sessions_per_participation
* n_null_answer_visits_per_session)
n_non_null_answer_fpv = (
n_participations_per_course
* n_sessions_per_participation
* n_non_null_answer_visits_per_session)
n_all_fpv = n_null_answer_fpv + n_non_null_answer_fpv
return n_all_fpv, n_null_answer_fpv, n_non_null_answer_fpv
@pytest.mark.slow
class PurgePageViewDataTaskTest(TwoCoursePageTestMixin,
PurgePageViewDataTaskTestSetUpMixin, TaskTestMixin,
TestCase):
""" test purge_page_view_data
"""
courses_setup_list = PURGE_VIEW_TWO_COURSE_SETUP_LIST
def setUp(self):
super().setUp()
# {{{ create flow page visits
# all 40, null answer 25, answerd 15
result1 = self.create_flow_page_visit(self.course1)
(self.course1_n_all_fpv, self.course1_n_null_answer_fpv,
self.course1_n_non_null_answer_fpv) = result1
# all 30, null answer 24, answerd 6
result2 = self.create_flow_page_visit(
self.course2,
n_participations_per_course=3, n_sessions_per_participation=2,
n_null_answer_visits_per_session=4,
n_non_null_answer_visits_per_session=1)
(self.course2_n_all_fpv, self.course2_n_null_answer_fpv,
self.course2_n_non_null_answer_fpv) = result2
# }}}
# reset the user_name format sequence
self.addCleanup(factories.UserFactory.reset_sequence)
def test_purge_page_view_data(self):
purge_page_view_data(self.course1.pk)
# Expected counts of course 1
self.assertEqual(
models.FlowPageVisit.objects.filter(
flow_session__course=self.course1).count(),
self.course1_n_non_null_answer_fpv
)
self.assertEqual(
models.FlowPageVisit.objects.filter(
flow_session__course=self.course1,
answer__isnull=True,
).count(),
0
)
# Counts for course 2 are not affected
self.assertEqual(
models.FlowPageVisit.objects.filter(
flow_session__course=self.course2).count(),
self.course2_n_all_fpv
)
self.assertEqual(
models.FlowPageVisit.objects.filter(
flow_session__course=self.course2,
answer__isnull=True,
).count(),
self.course2_n_null_answer_fpv
)
self.assertEqual(self.mock_update_state.call_count, 0)
# }}}
@pytest.mark.slow
class TasksTestsWithCeleryDependency(SingleCourseTestMixin, TestCase):
"""
This test involves celery. However, Django's sqlite3 is an in-memory database,
and Celery is started in a separate process, it is fundamentally impossible
to have Celery and Django to share a test database. In this tests, we only
test that the serialization is working. The results are checked in other tests
with settings.CELERY_TASK_ALWAYS_EAGER=True.
"""
flow_id = QUIZ_FLOW_ID
def test_expire_in_progress_sessions(self):
with mock.patch("course.tasks.Course.objects.get")\
as mock_course_object_get:
# This is to avoid errors
mock_course_object_get.return_value = self.course
expire_in_progress_sessions(
self.course.pk, self.flow_id,
rule_tag=None, now_datetime=now(),
past_due_only=True
)
mock_course_object_get.assert_called_once_with(id=self.course.pk)
def test_finish_in_progress_sessions(self):
with mock.patch("course.tasks.Course.objects.get")\
as mock_course_object_get:
# This is to avoid errors
mock_course_object_get.return_value = self.course
finish_in_progress_sessions(
self.course.pk, self.flow_id,
rule_tag=None, now_datetime=now(),
past_due_only=True
)
mock_course_object_get.assert_called_once_with(id=self.course.pk)
def test_regrade_flow_sessions(self):
with mock.patch("course.tasks.Course.objects.get")\
as mock_course_object_get:
# This is to avoid errors
mock_course_object_get.return_value = self.course
regrade_flow_sessions(
self.course.pk, self.flow_id,
access_rules_tag=None, inprog_value=None
)
mock_course_object_get.assert_called_once_with(id=self.course.pk)
def test_recalculate_ended_sessions(self):
with mock.patch("course.tasks.Course.objects.get")\
as mock_course_object_get:
# This is to avoid errors
mock_course_object_get.return_value = self.course
recalculate_ended_sessions(
self.course.pk, self.flow_id, rule_tag=None
)
mock_course_object_get.assert_called_once_with(id=self.course.pk)
def test_purge_page_view_data(self):
with mock.patch("course.tasks.Course.objects.get") \
as mock_course_object_get:
# This is to avoid errors
mock_course_object_get.return_value = self.course
purge_page_view_data(self.course.pk)
mock_course_object_get.assert_called_once_with(id=self.course.pk)
# vim: foldmethod=marker
| 38.858647
| 84
| 0.641384
|
e6155a79446590b7d71e84c8e4d7cabd37af6f56
| 1,187
|
py
|
Python
|
api/src/lib/twitter/users/user_lookup/params.py
|
kagemeka/twitter-api-python
|
c78e2b16177126d7808b90d09d6f565bd89dce5b
|
[
"MIT"
] | null | null | null |
api/src/lib/twitter/users/user_lookup/params.py
|
kagemeka/twitter-api-python
|
c78e2b16177126d7808b90d09d6f565bd89dce5b
|
[
"MIT"
] | null | null | null |
api/src/lib/twitter/users/user_lookup/params.py
|
kagemeka/twitter-api-python
|
c78e2b16177126d7808b90d09d6f565bd89dce5b
|
[
"MIT"
] | null | null | null |
import dataclasses
import typing
from .. import Expansions
from ...fields import TweetFields, UserFields
from ... import ConvertParams
@dataclasses.dataclass
class Params():
expansions: Expansions = Expansions()
tweet_fields: TweetFields = TweetFields()
user_fields: UserFields = UserFields()
def to_dict(self) -> dict:
convert = ConvertParams()
params = (
self.expansions,
self.tweet_fields,
self.user_fields,
)
return {
p.name: p.string
for p in map(convert, params)
}
@dataclasses.dataclass
class ByIdParams(Params): ...
@dataclasses.dataclass
class ByIdsParams(Params):
ids: typing.List[str] = dataclasses.field(
default_factory=list,
)
def to_dict(self) -> dict:
return {
'ids': ','.join(self.ids),
**super().to_dict(),
}
@dataclasses.dataclass
class ByUsernameParams(Params): ...
@dataclasses.dataclass
class ByUsernamesParams(Params):
usernames: typing.List[str] = dataclasses.field(
default_factory=list,
)
def to_dict(self) -> dict:
print(','.join(self.usernames))
return {
'usernames': ','.join(self.usernames),
**super().to_dict(),
}
| 17.716418
| 50
| 0.660489
|
adbbb20731e8b671460808372648cbb6abe68e90
| 194
|
py
|
Python
|
bgpy/__init__.py
|
munterfinger/background-py
|
3ebfe95855b6cc1b36f93165ad3ab14bf566a6d7
|
[
"MIT"
] | null | null | null |
bgpy/__init__.py
|
munterfinger/background-py
|
3ebfe95855b6cc1b36f93165ad3ab14bf566a6d7
|
[
"MIT"
] | 42
|
2021-02-08T18:00:48.000Z
|
2021-10-01T17:17:11.000Z
|
bgpy/__init__.py
|
munterfinger/bgpy
|
3ebfe95855b6cc1b36f93165ad3ab14bf566a6d7
|
[
"MIT"
] | null | null | null |
"""Top-level package for bgpy."""
from .client import Client
from .server import Server, respond
from .core.token import token_create
__all__ = ["Client", "Server", "respond", "token_create"]
| 24.25
| 57
| 0.731959
|
6edeed24cf9495ea698593379228040d2e39d8c3
| 6,613
|
py
|
Python
|
kollet/kollet.py
|
kollet-io/kollet-python-api-wrapper
|
3064c372ddc1b6391e16b3ffd21890249ca5c6c7
|
[
"MIT"
] | null | null | null |
kollet/kollet.py
|
kollet-io/kollet-python-api-wrapper
|
3064c372ddc1b6391e16b3ffd21890249ca5c6c7
|
[
"MIT"
] | null | null | null |
kollet/kollet.py
|
kollet-io/kollet-python-api-wrapper
|
3064c372ddc1b6391e16b3ffd21890249ca5c6c7
|
[
"MIT"
] | null | null | null |
"""
Kollet Merchant API Wrapper
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Python API wrapper for the Kollet Merchant API
:copyright: (c) 2021 by Kollet.io.
:license: MIT, see LICENSE for more details.
"""
__author__ = "Kollet.io"
__version__ = "0.0.2"
import requests
from .errors import KolletErrors
class Kollet(object):
def __init__(self, api_key: str):
"""
Instantiates an instance of :class: `Kollet`.
:param api_key: `str`, API Key that authenticates requests to the Kollet Merchant API
"""
self.__base_url: str = "https://api.kollet.io/v1"
self.__api_key = api_key
self.__endpoints = { "currencies": "currencies", "address": "address/create", "balance": "balance",
"estimate_fee": "estimateFee", "send": "send"}
def __request(self, endpoint: str, payload: dict):
url = f'{self.__base_url}/{endpoint}'
headers = {'Content-Type': 'application/json'}
response = requests.request("POST", url, json=payload, headers=headers)
if response.status_code == 200:
return response.json()
raise KolletErrors("Bad Request")
def get_currencies(self):
"""
Get all the cryptocurrencies supported by Kollet.
:return: JSON response
:error: raises error :class: `KolletErrors` when response success is false or status code is 400
"""
endpoint = self.__endpoints.get("currencies")
payload = {"accessToken": self.__api_key}
response = self.__request(endpoint, payload)
if response["success"]:
return response
raise KolletErrors(response["message"])
def create_address(self, currency: str, label: str, metadata: dict = {}):
"""
Generate a payment address. This is the address that customers pay to.
Parameters
----------
:param currency: `str`, the code of the supported cryptocurrency.Visit the supported cryptocurrency
section to view the currencies supported and their various codes. e.g. BTC
:param label: `str`, a unique id that is linked to the address. You can use this id to identify
different users or payment addresses.
:param metadata: `dict` An optional field where you can store a JSON object. This field is attached
to all webhooks when we are notifying you of new changes to the status of a payment.
:return: JSON response
:error: raises error :class: `KolletErrors` when response success is false or status code is 400
"""
endpoint = self.__endpoints.get("address")
payload = {
"accessToken": self.__api_key,
"currency": currency,
"label": label,
"metadata": metadata
}
response = self.__request(endpoint, payload)
if response["success"]:
return response
raise KolletErrors(response["message"])
def get_balance(self, currency: str):
"""
Get balance of a particular cryptocurrency.
Parameters
----------
:param currency: `str`, the code of the supported cryptocurrency.Visit the supported cryptocurrency
section to view the currencies supported and their various codes. e.g. BTC
:return: JSON response
:error: raises error :class: `KolletErrors` when response success is false or status code is 400
"""
endpoint = self.__endpoints.get("balance")
payload = {
"accessToken": self.__api_key,
"currency": currency
}
response = self.__request(endpoint, payload)
if response["success"]:
return response
raise KolletErrors(response["message"])
def estimate_network_fee(self, amount:str, currency: str, duration: str):
"""
Get an estimated fee for sending funds on a particular cryptocurrency network.
Parameters
----------
:param amount: `str`, the amount of cryptocurrency units you want to send out.
:param currency: `str`, the code of the supported cryptocurrency.Visit the supported cryptocurrency
section to view the currencies supported and their various codes. e.g. BTC
:param duration, `str`, this is the duration code. This duration code determines how much fees you actually pay and
how fast you your recipient receive their funds.
Refer to https://docs.kollet.io/docs/kollet-merchant/docs/2.0.Network-Fee-And-Duration.md for the different durations.
:return: JSON response
:error: raises error :class: `KolletErrors` when response success is false or status code is 400
"""
endpoint = self.__endpoints.get("estimate_fee")
payload = {
"accessToken": self.__api_key,
"amount": amount,
"currency": currency,
"duration": duration
}
response = self.__request(endpoint, payload)
if response["success"]:
return response
raise KolletErrors(response["message"])
def send_coins(self, amount:str, currency: str, duration: str, recipient: str):
"""
Get an estimated fee for sending funds on a particular cryptocurrency network.
Parameters
----------
:param amount: `str`, the amount of cryptocurrency units you want to send out.
:param currency: `str`, the code of the supported cryptocurrency.Visit the supported cryptocurrency
section to view the currencies supported and their various codes. e.g. BTC
:param duration, `str`, this is the duration code. This duration code determines how much fees you actually pay and
how fast you your recipient receive their funds.
Refer to https://docs.kollet.io/docs/kollet-merchant/docs/2.0.Network-Fee-And-Duration.md for the different durations.
:param recipient: `str`, this is the destination. The receiving wallet address/recipient.
:return: JSON response
:error: raises error :class: `KolletErrors` when response success is false or status code is 400
"""
endpoint = self.__endpoints.get("send")
payload = {
"accessToken": self.__api_key,
"amount": amount,
"currency": currency,
"duration": duration,
"recipient": recipient
}
response = self.__request(endpoint, payload)
if response["success"]:
return response
raise KolletErrors(response["message"])
| 38.447674
| 126
| 0.62861
|
7889aaa3daa93c9609596e7894d58998fa831b65
| 1,974
|
py
|
Python
|
nobos_commons/utils/human_surveyor.py
|
noboevbo/nobos_commons
|
471e52e10fd2228c106777c72d8439e58b047003
|
[
"MIT"
] | 2
|
2020-06-03T16:28:44.000Z
|
2020-10-10T03:07:23.000Z
|
nobos_commons/utils/human_surveyor.py
|
noboevbo/nobos_commons
|
471e52e10fd2228c106777c72d8439e58b047003
|
[
"MIT"
] | null | null | null |
nobos_commons/utils/human_surveyor.py
|
noboevbo/nobos_commons
|
471e52e10fd2228c106777c72d8439e58b047003
|
[
"MIT"
] | 4
|
2020-10-10T03:07:25.000Z
|
2021-09-30T01:11:02.000Z
|
from nobos_commons.data_structures.skeletons.limb_2d import Limb2D
from nobos_commons.data_structures.skeletons.skeleton_stickman_limbs import SkeletonStickmanLimbs
from nobos_commons.utils.limb_helper import get_limb_length
class HumanSurveyor(object):
def get_human_height(self, limbs: SkeletonStickmanLimbs) -> float:
bone_name, bone = self._get_bone_for_measurement(limbs)
bone_length = get_limb_length(bone)
return self._bone_measurements_to_person_heights[bone_name](bone_length)
def _get_bone_for_measurement(self, limbs: SkeletonStickmanLimbs) -> (str, Limb2D):
"""
Returns femur, tibia, humerus or radius in this order. Depends on if the bones are recognized or not
"""
if limbs.right_hip_to_right_knee is not None:
return "femur", limbs.right_hip_to_right_knee
if limbs.left_hip_to_left_knee is not None:
return "femur", limbs.left_hip_to_left_knee
if limbs.right_knee_to_right_ankle is not None:
return "tibia", limbs.right_knee_to_right_ankle
if limbs.left_knee_to_left_ankle is not None:
return "tibia", limbs.left_knee_to_left_ankle
if limbs.right_shoulder_to_right_elbow is not None:
return "humerus", limbs.right_shoulder_to_right_elbow
if limbs.left_shoulder_to_left_elbow is not None:
return "humerus", limbs.left_shoulder_to_left_elbow
if limbs.right_elbow_to_right_wrist is not None:
return "femur", limbs.right_elbow_to_right_wrist
if limbs.left_elbow_to_left_wrist is not None:
return "femur", limbs.left_elbow_to_left_wrist
return None, None
_bone_measurements_to_person_heights = {
"femur": lambda bone_length: 4 * bone_length,
"tibia": lambda bone_length: 4.44 * bone_length,
"humerus": lambda bone_length: 4.7 * bone_length,
"radius": lambda bone_length: 6.66 * bone_length
}
| 44.863636
| 108
| 0.720871
|
cef942e7eca1dcc44cb71fd0301284c26b14b06c
| 2,903
|
py
|
Python
|
data_loader.py
|
srviest/CharCNN_PyTorch
|
f269894ad40ef4d716b76b24db20379f102cae37
|
[
"Apache-2.0"
] | 132
|
2018-05-12T18:04:28.000Z
|
2022-03-21T13:20:16.000Z
|
data_loader.py
|
srviest/CharCNN_pytorch
|
f269894ad40ef4d716b76b24db20379f102cae37
|
[
"Apache-2.0"
] | 5
|
2018-10-31T23:19:09.000Z
|
2020-06-07T18:32:29.000Z
|
data_loader.py
|
srviest/CharCNN_pytorch
|
f269894ad40ef4d716b76b24db20379f102cae37
|
[
"Apache-2.0"
] | 38
|
2018-05-15T02:48:52.000Z
|
2021-11-09T22:04:32.000Z
|
#!/usr/bin/env python3
from torch.utils.data import DataLoader, Dataset
import torch.autograd as autograd
import torch
import json
import csv
class AGNEWs(Dataset):
def __init__(self, label_data_path, alphabet_path, l0 = 1014):
"""Create AG's News dataset object.
Arguments:
label_data_path: The path of label and data file in csv.
l0: max length of a sample.
alphabet_path: The path of alphabet json file.
"""
self.label_data_path = label_data_path
self.l0 = l0
# read alphabet
self.loadAlphabet(alphabet_path)
self.load(label_data_path)
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
X = self.oneHotEncode(idx)
y = self.y[idx]
return X, y
def loadAlphabet(self, alphabet_path):
with open(alphabet_path) as f:
self.alphabet = ''.join(json.load(f))
def load(self, label_data_path, lowercase = True):
self.label = []
self.data = []
with open(label_data_path, 'r') as f:
rdr = csv.reader(f, delimiter=',', quotechar='"')
# num_samples = sum(1 for row in rdr)
for index, row in enumerate(rdr):
self.label.append(int(row[0]))
txt = ' '.join(row[1:])
if lowercase:
txt = txt.lower()
self.data.append(txt)
self.y = torch.LongTensor(self.label)
def oneHotEncode(self, idx):
# X = (batch, 70, sequence_length)
X = torch.zeros(len(self.alphabet), self.l0)
sequence = self.data[idx]
for index_char, char in enumerate(sequence[::-1]):
if self.char2Index(char)!=-1:
X[self.char2Index(char)][index_char] = 1.0
return X
def char2Index(self, character):
return self.alphabet.find(character)
def getClassWeight(self):
num_samples = self.__len__()
label_set = set(self.label)
num_class = [self.label.count(c) for c in label_set]
class_weight = [num_samples/float(self.label.count(c)) for c in label_set]
return class_weight, num_class
if __name__ == '__main__':
label_data_path = 'data/ag_news_csv/test.csv'
alphabet_path = 'alphabet.json'
train_dataset = AGNEWs(label_data_path, alphabet_path)
train_loader = DataLoader(train_dataset, batch_size=64, num_workers=4, drop_last=False)
# size = 0
for i_batch, sample_batched in enumerate(train_loader):
if i_batch == 0:
print(sample_batched[0][0][0].shape)
# print(sample_batched)
# len(i_batch)
# print(sample_batched['label'].size())
# inputs = sample_batched['data']
# print(inputs.size())
# print('type(target): ', target)
| 31.554348
| 91
| 0.588012
|
1f4f5ab9e6b568caa74c1001de9942c7910b7bdb
| 3,519
|
py
|
Python
|
pet_promotion/projects/settings.py
|
minsgy/9th_Pet_Promotion_Backend
|
ecd18cb7bca635492f7bfc4815d43493c32d76d7
|
[
"MIT"
] | null | null | null |
pet_promotion/projects/settings.py
|
minsgy/9th_Pet_Promotion_Backend
|
ecd18cb7bca635492f7bfc4815d43493c32d76d7
|
[
"MIT"
] | null | null | null |
pet_promotion/projects/settings.py
|
minsgy/9th_Pet_Promotion_Backend
|
ecd18cb7bca635492f7bfc4815d43493c32d76d7
|
[
"MIT"
] | null | null | null |
"""
Django settings for projects project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fz32%-&@o$^g7hatkme0_f^8k-&rkg#2a_^k68$y26ycrk7!ch'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#App 등록
'user.apps.UserConfig',
'board.apps.BoardConfig',
#DRF Settings
'rest_framework',
'corsheaders',
'knox'
]
AUTH_USER_MODEL = 'user.User'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': ("knox.auth.TokenAuthentication",),
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
CORS_ORIGIN_WHITELIST = [
'http://localhost:3000'
]
ROOT_URLCONF = 'projects.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'projects.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| 24.4375
| 91
| 0.693947
|
9fbd2fb37571c4349b968ea258da32ba70154d3f
| 286
|
py
|
Python
|
hseling_api_nauchpop/setup.py
|
Neundert/hseling-api-nauchpop
|
01ea3027f072577dbb5243291bf91be5bd059336
|
[
"MIT"
] | null | null | null |
hseling_api_nauchpop/setup.py
|
Neundert/hseling-api-nauchpop
|
01ea3027f072577dbb5243291bf91be5bd059336
|
[
"MIT"
] | 1
|
2018-12-16T17:45:17.000Z
|
2018-12-16T19:19:03.000Z
|
hseling_api_nauchpop/setup.py
|
Neundert/hseling-api-nauchpop
|
01ea3027f072577dbb5243291bf91be5bd059336
|
[
"MIT"
] | 3
|
2018-12-08T13:31:56.000Z
|
2022-01-20T05:52:35.000Z
|
from setuptools import setup, find_packages
setup(
name='',
version='0.1',
description='A description.',
packages=find_packages(exclude=['ez_setup', 'tests', 'tests.*']),
package_data={'': ['license.txt']},
include_package_data=True,
install_requires=[],
)
| 23.833333
| 69
| 0.653846
|
51f84b2cdce0988350db8b2a5f54e7b5ff439dea
| 6,520
|
py
|
Python
|
tests/modules/users/test_repository.py
|
jorge4larcon/fastproject
|
0a091940c22e27d813f38819027081314d334c22
|
[
"MIT"
] | 12
|
2022-02-11T04:03:17.000Z
|
2022-02-15T03:34:50.000Z
|
tests/modules/users/test_repository.py
|
jorge4larcon/fastproject
|
0a091940c22e27d813f38819027081314d334c22
|
[
"MIT"
] | null | null | null |
tests/modules/users/test_repository.py
|
jorge4larcon/fastproject
|
0a091940c22e27d813f38819027081314d334c22
|
[
"MIT"
] | 1
|
2022-02-13T02:55:50.000Z
|
2022-02-13T02:55:50.000Z
|
"""Tests for module modules.users.password_validators."""
import datetime
import uuid
import asyncpg
import pytest
from fastproject.modules.users import exceptions, repository
class MockPoolAcquireContext:
pass
@pytest.mark.asyncio
async def test_insert_user(monkeypatch):
async def mock_insert_user(conn, **kwargs):
return {
"uuser_id": uuid.UUID("de623351-1398-4a83-98c5-91a34f5919ee"),
"username": kwargs["username"],
"email": kwargs["email"],
"first_name": kwargs["first_name"],
"last_name": kwargs["last_name"],
"password": kwargs["password"],
"is_superuser": kwargs["is_superuser"],
"is_staff": kwargs["is_staff"],
"is_active": kwargs["is_active"],
"date_joined": kwargs["date_joined"],
"last_login": kwargs["last_login"],
}
monkeypatch.setattr(repository._queries, "insert_user", mock_insert_user)
inserted = await repository.insert_user(
username="soulofcinder",
email="soc@kotff.com",
first_name="Soul",
last_name="Of Cinder",
password="averysecrethash",
is_superuser=True,
is_staff=True,
is_active=True,
date_joined=datetime.datetime(1999, 1, 22),
last_login=datetime.datetime(2002, 11, 26),
conn=MockPoolAcquireContext(),
)
assert type(inserted) is repository.User
async def mock_insert_user(conn, **kwargs):
raise asyncpg.UniqueViolationError("username")
monkeypatch.setattr(repository._queries, "insert_user", mock_insert_user)
with pytest.raises(exceptions.UsernameAlreadyExistsError):
await repository.insert_user(conn=MockPoolAcquireContext())
async def mock_insert_user(conn, **kwargs):
raise asyncpg.UniqueViolationError("email")
monkeypatch.setattr(repository._queries, "insert_user", mock_insert_user)
with pytest.raises(exceptions.EmailAlreadyExistsError):
await repository.insert_user(conn=MockPoolAcquireContext())
@pytest.mark.asyncio
async def test_get_user_by_id(monkeypatch):
async def mock_get_user_by_id(conn, uuser_id):
if uuser_id == uuid.UUID("de623351-1398-4a83-98c5-91a34f5919ee"):
return {
"uuser_id": uuid.UUID("de623351-1398-4a83-98c5-91a34f5919ee"),
"username": "soulofcinder",
"email": "soc@kotff.com",
"first_name": "Soul",
"last_name": "Of Cinder",
"password": "averysecrethash",
"is_superuser": True,
"is_staff": True,
"is_active": True,
"date_joined": datetime.datetime(1999, 1, 22),
"last_login": datetime.datetime(2002, 11, 26),
}
return None
monkeypatch.setattr(repository._queries, "get_user_by_id", mock_get_user_by_id)
searched = await repository.get_user_by_id(
uuid.UUID("de623351-1398-4a83-98c5-91a34f5919ee"), conn=MockPoolAcquireContext()
)
assert type(searched) is repository.User
searched = await repository.get_user_by_id(
uuid.UUID("de623351-1398-4a83-98c5-91a34f5919aE"), conn=MockPoolAcquireContext()
)
assert searched is None
@pytest.mark.asyncio
async def test_update_user_by_id(monkeypatch):
async def mock_update_user_by_id(conn, uuser_id, **kwargs):
if uuser_id == uuid.UUID("de623351-1398-4a83-98c5-91a34f5919ee"):
return {
"uuser_id": uuid.UUID("de623351-1398-4a83-98c5-91a34f5919ee"),
"username": "soulofcinder",
"email": "soc@kotff.com",
"first_name": "Soul",
"last_name": "Of Cinder",
"password": "averysecrethash",
"is_superuser": True,
"is_staff": True,
"is_active": True,
"date_joined": datetime.datetime(1999, 1, 22),
"last_login": datetime.datetime(2002, 11, 26),
}
return None
monkeypatch.setattr(
repository._queries, "update_user_by_id", mock_update_user_by_id
)
updated = await repository.update_user_by_id(
uuid.UUID("de623351-1398-4a83-98c5-91a34f5919ee")
)
assert type(updated) is repository.User
updated = await repository.update_user_by_id(
uuid.UUID("de623351-1398-4a83-98c5-91a34f5919AA")
)
assert updated is None
async def mock_update_user_by_id(conn, uuser_id, **kwargs):
raise asyncpg.UniqueViolationError("username")
monkeypatch.setattr(
repository._queries, "update_user_by_id", mock_update_user_by_id
)
with pytest.raises(exceptions.UsernameAlreadyExistsError):
await repository.update_user_by_id(
uuid.UUID("de623351-1398-4a83-98c5-91a34f5919ee")
)
async def mock_update_user_by_id(conn, uuser_id, **kwargs):
raise asyncpg.UniqueViolationError("email")
monkeypatch.setattr(
repository._queries, "update_user_by_id", mock_update_user_by_id
)
with pytest.raises(exceptions.EmailAlreadyExistsError):
await repository.update_user_by_id(
uuid.UUID("de623351-1398-4a83-98c5-91a34f5919ee")
)
@pytest.mark.asyncio
async def test_delete_user_by_id(monkeypatch):
async def mock_delete_user_by_id(conn, uuser_id):
if uuser_id == uuid.UUID("de623351-1398-4a83-98c5-91a34f5919ee"):
return {
"uuser_id": uuid.UUID("de623351-1398-4a83-98c5-91a34f5919ee"),
"username": "soulofcinder",
"email": "soc@kotff.com",
"first_name": "Soul",
"last_name": "Of Cinder",
"password": "averysecrethash",
"is_superuser": True,
"is_staff": True,
"is_active": True,
"date_joined": datetime.datetime(1999, 1, 22),
"last_login": datetime.datetime(2002, 11, 26),
}
return None
monkeypatch.setattr(
repository._queries, "delete_user_by_id", mock_delete_user_by_id
)
deleted = await repository.delete_user_by_id(
uuid.UUID("de623351-1398-4a83-98c5-91a34f5919ee"), conn=MockPoolAcquireContext()
)
assert type(deleted) is repository.User
deleted = await repository.delete_user_by_id(
uuid.UUID("de623351-1398-4a83-98c5-91a34f5919aE"), conn=MockPoolAcquireContext()
)
assert deleted is None
| 36.629213
| 88
| 0.636196
|
6150c7ea62b1a183579ea526b889463ecf5173e7
| 17,475
|
py
|
Python
|
src/prefect/cli/agent.py
|
zmac12/prefect
|
7fe55a83f275a01d95268ff9e4bd5f5b349728e1
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/cli/agent.py
|
zmac12/prefect
|
7fe55a83f275a01d95268ff9e4bd5f5b349728e1
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/cli/agent.py
|
zmac12/prefect
|
7fe55a83f275a01d95268ff9e4bd5f5b349728e1
|
[
"Apache-2.0"
] | null | null | null |
import click
from prefect import config
from prefect.utilities.configuration import set_temporary_config
from prefect.utilities.serialization import from_qualified_name
_agents = {
"fargate": "prefect.agent.fargate.FargateAgent",
"docker": "prefect.agent.docker.DockerAgent",
"kubernetes": "prefect.agent.kubernetes.KubernetesAgent",
"local": "prefect.agent.local.LocalAgent",
}
@click.group(hidden=True)
def agent():
"""
Manage Prefect agents.
\b
Usage:
$ prefect agent [COMMAND]
\b
Arguments:
start Start a Prefect agent
install Output platform-specific agent installation configs
\b
Examples:
$ prefect agent start
...agent begins running in process...
\b
$ prefect agent start kubernetes --token MY_TOKEN
...agent begins running in process...
\b
$ prefect agent install kubernetes --token MY_TOKEN --namespace metrics
...k8s yaml output...
"""
@agent.command(
hidden=True,
context_settings=dict(ignore_unknown_options=True, allow_extra_args=True),
)
@click.argument("agent-option", default="local")
@click.option(
"--token", "-t", required=False, help="A Prefect Cloud API token.", hidden=True
)
@click.option("--api", "-a", required=False, help="A Prefect API URL.", hidden=True)
@click.option("--agent-config-id", required=False, help="An agent ID", hidden=True)
@click.option(
"--name",
"-n",
required=False,
help="A name to use for the agent",
hidden=True,
default=None,
)
@click.option(
"--verbose", "-v", is_flag=True, help="Enable verbose agent logs.", hidden=True
)
@click.option(
"--label",
"-l",
multiple=True,
help="Labels the agent will use to query for flow runs.",
hidden=True,
)
@click.option(
"--env",
"-e",
multiple=True,
help="Environment variables to set on each submitted flow run.",
hidden=True,
)
@click.option(
"--max-polls",
required=False,
help="Maximum number of polls for the agent",
hidden=True,
type=int,
)
@click.option(
"--agent-address",
required=False,
help="Address to serve internal api server at. Defaults to no server.",
hidden=True,
type=str,
default="",
)
@click.option(
"--namespace",
required=False,
help="Kubernetes namespace to create jobs.",
hidden=True,
)
@click.option(
"--job-template",
required=False,
help="Path to a kubernetes job template to use instead of the default.",
hidden=True,
)
@click.option(
"--import-path",
"-p",
multiple=True,
help="Import paths the local agent will add to all flow runs.",
hidden=True,
)
@click.option(
"--show-flow-logs",
"-f",
help="Display logging output from flows run by the agent.",
hidden=True,
is_flag=True,
)
@click.option("--no-pull", is_flag=True, help="Pull images flag.", hidden=True)
@click.option(
"--no-cloud-logs",
is_flag=True,
help="Turn off logging for all flows run through this agent.",
hidden=True,
)
@click.option("--base-url", "-b", help="Docker daemon base URL.", hidden=True)
@click.option(
"--volume",
multiple=True,
help="Host paths for Docker bind mount volumes attached to each Flow runtime container.",
hidden=True,
)
@click.option(
"--network",
help="Add containers to an existing docker network",
hidden=True,
)
@click.option(
"--no-docker-interface",
is_flag=True,
help="Disable presence of a Docker interface.",
hidden=True,
)
@click.pass_context
def start(
ctx,
agent_option,
token,
api,
agent_config_id,
name,
verbose,
label,
env,
namespace,
job_template,
no_pull,
no_cloud_logs,
base_url,
import_path,
show_flow_logs,
volume,
network,
no_docker_interface,
max_polls,
agent_address,
):
"""
Start an agent.
\b
Arguments:
agent-option TEXT The name of an agent to start (e.g. `docker`, `kubernetes`,
`local`, `fargate`). Defaults to `local`
\b
Options:
--token, -t TEXT A Prefect Cloud API token with RUNNER scope
--api, -a TEXT A Prefect API URL
--agent-config--id TEXT An agent ID to link this agent instance with
--name, -n TEXT A name to use for the agent
--verbose, -v Enable verbose agent DEBUG logs
Defaults to INFO level logging
--label, -l TEXT Labels the agent will use to query for flow runs
Multiple values supported e.g. `-l label1 -l label2`
--env, -e TEXT Environment variables to set on each submitted flow
run.
Note that equal signs in environment variable values
are not currently supported from the CLI. Multiple
values supported.
e.g. `-e AUTH=token -e PKG_SETTING=true`
--max-polls INT Maximum number of times the agent should poll the
Prefect API for flow runs. Will run forever if not
specified.
--no-cloud-logs Turn off logging to the Prefect API for all flow runs
Defaults to `False`
--agent-address TEXT The address to server internal api at. Currently this
is just health checks for use by an orchestration layer
(e.g. kubernetes). Leave blank for no api server (default).
\b
Local Agent:
--import-path, -p TEXT Import paths which will be provided to each Flow's
runtime environment. Used for Flows which might
import from scripts or local packages. Multiple values
supported.
e.g. `-p /root/my_scripts -p /utilities`
--show-flow-logs, -f Display logging output from flows run by the agent
(available for Local and Docker agents only)
\b
Docker Agent:
--base-url, -b TEXT A Docker daemon host URL for a DockerAgent
--no-pull Pull images for a DockerAgent
Defaults to pulling if not provided
--volume TEXT Host paths for Docker bind mount volumes attached to
each Flow runtime container. Multiple values supported.
e.g. `--volume /some/path`
--network TEXT Add containers to an existing docker network
--no-docker-interface Disable the check of a Docker interface on this machine.
Note: This is mostly relevant for some Docker-in-Docker
setups that users may be running their agent with.
\b
Kubernetes Agent:
--namespace TEXT A Kubernetes namespace to create Prefect jobs in
Defaults to env var `NAMESPACE` or `default`
--job-template TEXT Path to a job template to use instead of the default.
\b
Fargate Agent Options:
Any of the configuration options outlined in the docs can be provided here
https://docs.prefect.io/orchestration/agents/fargate.html#configuration
"""
# Split context
kwargs = dict()
for item in ctx.args:
item = item.replace("--", "")
kwargs.update([item.split("=")])
tmp_config = {
"cloud.agent.auth_token": token or config.cloud.agent.auth_token,
}
if verbose:
tmp_config["cloud.agent.level"] = "DEBUG"
if api:
tmp_config["cloud.api"] = api
with set_temporary_config(tmp_config):
retrieved_agent = _agents.get(agent_option, None)
if not retrieved_agent:
click.secho("{} is not a valid agent".format(agent_option), fg="red")
return
env_vars = dict()
for env_var in env:
k, v = env_var.split("=")
env_vars[k] = v
labels = list(set(label))
if agent_option == "local":
from_qualified_name(retrieved_agent)(
agent_config_id=agent_config_id,
name=name,
labels=labels,
env_vars=env_vars,
max_polls=max_polls,
agent_address=agent_address,
import_paths=list(import_path),
show_flow_logs=show_flow_logs,
no_cloud_logs=no_cloud_logs,
).start()
elif agent_option == "docker":
from_qualified_name(retrieved_agent)(
agent_config_id=agent_config_id,
name=name,
labels=labels,
env_vars=env_vars,
max_polls=max_polls,
agent_address=agent_address,
base_url=base_url,
no_pull=no_pull,
show_flow_logs=show_flow_logs,
volumes=list(volume),
network=network,
docker_interface=not no_docker_interface,
).start()
elif agent_option == "fargate":
from_qualified_name(retrieved_agent)(
agent_config_id=agent_config_id,
name=name,
labels=labels,
env_vars=env_vars,
max_polls=max_polls,
agent_address=agent_address,
**kwargs
).start()
elif agent_option == "kubernetes":
from_qualified_name(retrieved_agent)(
agent_config_id=agent_config_id,
namespace=namespace,
job_template_path=job_template,
name=name,
labels=labels,
env_vars=env_vars,
max_polls=max_polls,
agent_address=agent_address,
).start()
else:
from_qualified_name(retrieved_agent)(
agent_config_id=agent_config_id,
name=name,
labels=labels,
env_vars=env_vars,
max_polls=max_polls,
agent_address=agent_address,
).start()
@agent.command(hidden=True)
@click.argument("name")
@click.option(
"--token", "-t", required=False, help="A Prefect Cloud API token.", hidden=True
)
@click.option("--api", "-a", required=False, help="A Prefect API URL.", hidden=True)
@click.option(
"--namespace",
"-n",
required=False,
help="Agent namespace to launch workloads.",
hidden=True,
)
@click.option(
"--image-pull-secrets",
"-i",
required=False,
help="Name of image pull secrets to use for workloads.",
hidden=True,
)
@click.option(
"--resource-manager", is_flag=True, help="Enable resource manager.", hidden=True
)
@click.option("--rbac", is_flag=True, help="Enable default RBAC.", hidden=True)
@click.option(
"--latest", is_flag=True, help="Use the latest Prefect image.", hidden=True
)
@click.option(
"--mem-request",
required=False,
help="Requested memory for Prefect init job.",
hidden=True,
)
@click.option(
"--mem-limit",
required=False,
help="Limit memory for Prefect init job.",
hidden=True,
)
@click.option(
"--cpu-request",
required=False,
help="Requested CPU for Prefect init job.",
hidden=True,
)
@click.option(
"--cpu-limit", required=False, help="Limit CPU for Prefect init job.", hidden=True
)
@click.option(
"--image-pull-policy",
required=False,
help="imagePullPolicy for Prefect init job",
hidden=True,
)
@click.option(
"--service-account-name",
required=False,
help="Name of Service Account for Prefect init job",
hidden=True,
)
@click.option(
"--label",
"-l",
multiple=True,
help="Labels the agent will use to query for flow runs.",
hidden=True,
)
@click.option(
"--env",
"-e",
multiple=True,
help="Environment variables to set on each submitted flow run.",
hidden=True,
)
@click.option(
"--import-path",
"-p",
multiple=True,
help="Import paths the local agent will add to all flow runs.",
hidden=True,
)
@click.option(
"--show-flow-logs",
"-f",
help="Display logging output from flows run by the agent.",
hidden=True,
is_flag=True,
)
@click.option(
"--backend",
"-b",
required=False,
help="Prefect backend to use for this agent.",
hidden=True,
)
def install(
name,
token,
api,
namespace,
image_pull_secrets,
resource_manager,
rbac,
latest,
mem_request,
mem_limit,
cpu_request,
cpu_limit,
image_pull_policy,
service_account_name,
label,
env,
import_path,
show_flow_logs,
backend,
):
"""
Install an agent. Outputs configuration text which can be used to install on various
platforms. The Prefect image version will default to your local `prefect.__version__`
\b
Arguments:
name TEXT The name of an agent to install (e.g.
`kubernetes`, `local`)
\b
Options:
--token, -t TEXT A Prefect Cloud API token
--label, -l TEXT Labels the agent will use to query for flow runs
Multiple values supported.
e.g. `-l label1 -l label2`
--env, -e TEXT Environment variables to set on each submitted
flow run. Note that equal signs in environment
variable values are not currently supported from
the CLI. Multiple values supported.
e.g. `-e AUTH=token -e PKG_SETTING=true`
\b
Kubernetes Agent:
--api, -a TEXT A Prefect API URL
--namespace, -n TEXT Agent namespace to launch workloads
--image-pull-secrets, -i TEXT Name of image pull secrets to use for workloads
--resource-manager Enable resource manager on install
--rbac Enable default RBAC on install
--latest Use the `latest` Prefect image
--mem-request TEXT Requested memory for Prefect init job
--mem-limit TEXT Limit memory for Prefect init job
--cpu-request TEXT Requested CPU for Prefect init job
--cpu-limit TEXT Limit CPU for Prefect init job
--image-pull-policy TEXT imagePullPolicy for Prefect init job
--service-account-name TEXT Name of Service Account for Prefect init job
--backend TEST Prefect backend to use for this agent
Defaults to the backend currently set in config.
\b
Local Agent:
--import-path, -p TEXT Absolute import paths to provide to the local
agent. Multiple values supported.
e.g. `-p /root/my_scripts -p /utilities`
--show-flow-logs, -f Display logging output from flows run by the
agent
"""
supported_agents = {
"kubernetes": "prefect.agent.kubernetes.KubernetesAgent",
"local": "prefect.agent.local.LocalAgent",
}
retrieved_agent = supported_agents.get(name, None)
if not retrieved_agent:
click.secho("{} is not a supported agent for `install`".format(name), fg="red")
return
env_vars = dict()
for env_var in env:
k, v = env_var.split("=")
env_vars[k] = v
labels = list(set(label))
if name == "kubernetes":
deployment = from_qualified_name(retrieved_agent).generate_deployment_yaml(
token=token,
api=api,
namespace=namespace,
image_pull_secrets=image_pull_secrets,
resource_manager_enabled=resource_manager,
rbac=rbac,
latest=latest,
mem_request=mem_request,
mem_limit=mem_limit,
cpu_request=cpu_request,
cpu_limit=cpu_limit,
image_pull_policy=image_pull_policy,
service_account_name=service_account_name,
labels=labels,
env_vars=env_vars,
backend=backend,
)
click.echo(deployment)
elif name == "local":
conf = from_qualified_name(retrieved_agent).generate_supervisor_conf(
token=token,
labels=labels,
import_paths=list(import_path),
show_flow_logs=show_flow_logs,
)
click.echo(conf)
| 32.847744
| 99
| 0.559371
|
8b523e634dcf4e0452769ab00883b2fd2bac5c29
| 16,287
|
py
|
Python
|
mmdet3d/datasets/scannet_dataset.py
|
xiangruhuang/mmdetection3d
|
e669263a60a361532a077f655721a885f8ac6280
|
[
"Apache-2.0"
] | 29
|
2021-09-29T13:31:12.000Z
|
2022-03-15T13:31:25.000Z
|
mmdet3d/datasets/scannet_dataset.py
|
xiangruhuang/mmdetection3d
|
e669263a60a361532a077f655721a885f8ac6280
|
[
"Apache-2.0"
] | 3
|
2021-12-13T01:21:12.000Z
|
2022-02-24T01:46:14.000Z
|
mmdet3d/datasets/scannet_dataset.py
|
xiangruhuang/mmdetection3d
|
e669263a60a361532a077f655721a885f8ac6280
|
[
"Apache-2.0"
] | 1
|
2021-12-03T08:39:18.000Z
|
2021-12-03T08:39:18.000Z
|
import numpy as np
import tempfile
import warnings
from os import path as osp
from mmdet3d.core import show_result, show_seg_result
from mmdet3d.core.bbox import DepthInstance3DBoxes
from mmdet.datasets import DATASETS
from mmseg.datasets import DATASETS as SEG_DATASETS
from .custom_3d import Custom3DDataset
from .custom_3d_seg import Custom3DSegDataset
from .pipelines import Compose
@DATASETS.register_module()
class ScanNetDataset(Custom3DDataset):
r"""ScanNet Dataset for Detection Task.
This class serves as the API for experiments on the ScanNet Dataset.
Please refer to the `github repo <https://github.com/ScanNet/ScanNet>`_
for data downloading.
Args:
data_root (str): Path of dataset root.
ann_file (str): Path of annotation file.
pipeline (list[dict], optional): Pipeline used for data processing.
Defaults to None.
classes (tuple[str], optional): Classes used in the dataset.
Defaults to None.
modality (dict, optional): Modality to specify the sensor data used
as input. Defaults to None.
box_type_3d (str, optional): Type of 3D box of this dataset.
Based on the `box_type_3d`, the dataset will encapsulate the box
to its original format then converted them to `box_type_3d`.
Defaults to 'Depth' in this dataset. Available options includes
- 'LiDAR': Box in LiDAR coordinates.
- 'Depth': Box in depth coordinates, usually for indoor dataset.
- 'Camera': Box in camera coordinates.
filter_empty_gt (bool, optional): Whether to filter empty GT.
Defaults to True.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
"""
CLASSES = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain',
'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',
'garbagebin')
def __init__(self,
data_root,
ann_file,
pipeline=None,
classes=None,
modality=None,
box_type_3d='Depth',
filter_empty_gt=True,
test_mode=False):
super().__init__(
data_root=data_root,
ann_file=ann_file,
pipeline=pipeline,
classes=classes,
modality=modality,
box_type_3d=box_type_3d,
filter_empty_gt=filter_empty_gt,
test_mode=test_mode)
def get_ann_info(self, index):
"""Get annotation info according to the given index.
Args:
index (int): Index of the annotation data to get.
Returns:
dict: annotation information consists of the following keys:
- gt_bboxes_3d (:obj:`DepthInstance3DBoxes`): \
3D ground truth bboxes
- gt_labels_3d (np.ndarray): Labels of ground truths.
- pts_instance_mask_path (str): Path of instance masks.
- pts_semantic_mask_path (str): Path of semantic masks.
- axis_align_matrix (np.ndarray): Transformation matrix for \
global scene alignment.
"""
# Use index to get the annos, thus the evalhook could also use this api
info = self.data_infos[index]
if info['annos']['gt_num'] != 0:
gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
np.float32) # k, 6
gt_labels_3d = info['annos']['class'].astype(np.long)
else:
gt_bboxes_3d = np.zeros((0, 6), dtype=np.float32)
gt_labels_3d = np.zeros((0, ), dtype=np.long)
# to target box structure
gt_bboxes_3d = DepthInstance3DBoxes(
gt_bboxes_3d,
box_dim=gt_bboxes_3d.shape[-1],
with_yaw=False,
origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)
pts_instance_mask_path = osp.join(self.data_root,
info['pts_instance_mask_path'])
pts_semantic_mask_path = osp.join(self.data_root,
info['pts_semantic_mask_path'])
axis_align_matrix = self._get_axis_align_matrix(info)
anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d,
gt_labels_3d=gt_labels_3d,
pts_instance_mask_path=pts_instance_mask_path,
pts_semantic_mask_path=pts_semantic_mask_path,
axis_align_matrix=axis_align_matrix)
return anns_results
def prepare_test_data(self, index):
"""Prepare data for testing.
We should take axis_align_matrix from self.data_infos since we need \
to align point clouds.
Args:
index (int): Index for accessing the target data.
Returns:
dict: Testing data dict of the corresponding index.
"""
input_dict = self.get_data_info(index)
# take the axis_align_matrix from data_infos
input_dict['ann_info'] = dict(
axis_align_matrix=self._get_axis_align_matrix(
self.data_infos[index]))
self.pre_pipeline(input_dict)
example = self.pipeline(input_dict)
return example
@staticmethod
def _get_axis_align_matrix(info):
"""Get axis_align_matrix from info. If not exist, return identity mat.
Args:
info (dict): one data info term.
Returns:
np.ndarray: 4x4 transformation matrix.
"""
if 'axis_align_matrix' in info['annos'].keys():
return info['annos']['axis_align_matrix'].astype(np.float32)
else:
warnings.warn(
'axis_align_matrix is not found in ScanNet data info, please '
'use new pre-process scripts to re-generate ScanNet data')
return np.eye(4).astype(np.float32)
def _build_default_pipeline(self):
"""Build the default pipeline for this dataset."""
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
load_dim=6,
use_dim=[0, 1, 2]),
dict(type='GlobalAlignment', rotation_axis=2),
dict(
type='DefaultFormatBundle3D',
class_names=self.CLASSES,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
return Compose(pipeline)
def show(self, results, out_dir, show=True, pipeline=None):
"""Results visualization.
Args:
results (list[dict]): List of bounding boxes results.
out_dir (str): Output directory of visualization result.
show (bool): Visualize the results online.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
"""
assert out_dir is not None, 'Expect out_dir, got none.'
pipeline = self._get_pipeline(pipeline)
for i, result in enumerate(results):
data_info = self.data_infos[i]
pts_path = data_info['pts_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points = self._extract_data(i, pipeline, 'points').numpy()
gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor.numpy()
pred_bboxes = result['boxes_3d'].tensor.numpy()
show_result(points, gt_bboxes, pred_bboxes, out_dir, file_name,
show)
@DATASETS.register_module()
@SEG_DATASETS.register_module()
class ScanNetSegDataset(Custom3DSegDataset):
r"""ScanNet Dataset for Semantic Segmentation Task.
This class serves as the API for experiments on the ScanNet Dataset.
Please refer to the `github repo <https://github.com/ScanNet/ScanNet>`_
for data downloading.
Args:
data_root (str): Path of dataset root.
ann_file (str): Path of annotation file.
pipeline (list[dict], optional): Pipeline used for data processing.
Defaults to None.
classes (tuple[str], optional): Classes used in the dataset.
Defaults to None.
palette (list[list[int]], optional): The palette of segmentation map.
Defaults to None.
modality (dict, optional): Modality to specify the sensor data used
as input. Defaults to None.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
ignore_index (int, optional): The label index to be ignored, e.g. \
unannotated points. If None is given, set to len(self.CLASSES).
Defaults to None.
scene_idxs (np.ndarray | str, optional): Precomputed index to load
data. For scenes with many points, we may sample it several times.
Defaults to None.
label_weight (np.ndarray | str, optional): Precomputed weight to \
balance loss calculation. If None is given, compute from data.
Defaults to None.
"""
CLASSES = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',
'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink',
'bathtub', 'otherfurniture')
VALID_CLASS_IDS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28,
33, 34, 36, 39)
ALL_CLASS_IDS = tuple(range(41))
PALETTE = [
[174, 199, 232],
[152, 223, 138],
[31, 119, 180],
[255, 187, 120],
[188, 189, 34],
[140, 86, 75],
[255, 152, 150],
[214, 39, 40],
[197, 176, 213],
[148, 103, 189],
[196, 156, 148],
[23, 190, 207],
[247, 182, 210],
[219, 219, 141],
[255, 127, 14],
[158, 218, 229],
[44, 160, 44],
[112, 128, 144],
[227, 119, 194],
[82, 84, 163],
]
def __init__(self,
data_root,
ann_file,
pipeline=None,
classes=None,
palette=None,
modality=None,
test_mode=False,
ignore_index=None,
scene_idxs=None,
label_weight=None):
super().__init__(
data_root=data_root,
ann_file=ann_file,
pipeline=pipeline,
classes=classes,
palette=palette,
modality=modality,
test_mode=test_mode,
ignore_index=ignore_index,
scene_idxs=scene_idxs,
label_weight=label_weight)
def get_ann_info(self, index):
"""Get annotation info according to the given index.
Args:
index (int): Index of the annotation data to get.
Returns:
dict: annotation information consists of the following keys:
- pts_semantic_mask_path (str): Path of semantic masks.
"""
# Use index to get the annos, thus the evalhook could also use this api
info = self.data_infos[index]
pts_semantic_mask_path = osp.join(self.data_root,
info['pts_semantic_mask_path'])
anns_results = dict(pts_semantic_mask_path=pts_semantic_mask_path)
return anns_results
def _build_default_pipeline(self):
"""Build the default pipeline for this dataset."""
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='LoadAnnotations3D',
with_bbox_3d=False,
with_label_3d=False,
with_mask_3d=False,
with_seg_3d=True),
dict(
type='PointSegClassMapping',
valid_cat_ids=self.VALID_CLASS_IDS,
max_cat_id=np.max(self.ALL_CLASS_IDS)),
dict(
type='DefaultFormatBundle3D',
with_label=False,
class_names=self.CLASSES),
dict(type='Collect3D', keys=['points', 'pts_semantic_mask'])
]
return Compose(pipeline)
def show(self, results, out_dir, show=True, pipeline=None):
"""Results visualization.
Args:
results (list[dict]): List of bounding boxes results.
out_dir (str): Output directory of visualization result.
show (bool): Visualize the results online.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
"""
assert out_dir is not None, 'Expect out_dir, got none.'
pipeline = self._get_pipeline(pipeline)
for i, result in enumerate(results):
data_info = self.data_infos[i]
pts_path = data_info['pts_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points, gt_sem_mask = self._extract_data(
i, pipeline, ['points', 'pts_semantic_mask'], load_annos=True)
points = points.numpy()
pred_sem_mask = result['semantic_mask'].numpy()
show_seg_result(points, gt_sem_mask,
pred_sem_mask, out_dir, file_name,
np.array(self.PALETTE), self.ignore_index, show)
def get_scene_idxs_and_label_weight(self, scene_idxs, label_weight):
"""Compute scene_idxs for data sampling and label weight for loss \
calculation.
We sample more times for scenes with more points. Label_weight is
inversely proportional to number of class points.
"""
# when testing, we load one whole scene every time
# and we don't need label weight for loss calculation
if not self.test_mode and scene_idxs is None:
raise NotImplementedError(
'please provide re-sampled scene indexes for training')
return super().get_scene_idxs_and_label_weight(scene_idxs,
label_weight)
def format_results(self, results, txtfile_prefix=None):
r"""Format the results to txt file. Refer to `ScanNet documentation
<http://kaldir.vc.in.tum.de/scannet_benchmark/documentation>`_.
Args:
outputs (list[dict]): Testing results of the dataset.
txtfile_prefix (str | None): The prefix of saved files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (outputs, tmp_dir), outputs is the detection results,
tmp_dir is the temporal directory created for saving submission
files when ``submission_prefix`` is not specified.
"""
import mmcv
if txtfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
txtfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
mmcv.mkdir_or_exist(txtfile_prefix)
# need to map network output to original label idx
pred2label = np.zeros(len(self.VALID_CLASS_IDS)).astype(np.int)
for original_label, output_idx in self.label_map.items():
if output_idx != self.ignore_index:
pred2label[output_idx] = original_label
outputs = []
for i, result in enumerate(results):
info = self.data_infos[i]
sample_idx = info['point_cloud']['lidar_idx']
pred_sem_mask = result['semantic_mask'].numpy().astype(np.int)
pred_label = pred2label[pred_sem_mask]
curr_file = f'{txtfile_prefix}/{sample_idx}.txt'
np.savetxt(curr_file, pred_label, fmt='%d')
outputs.append(dict(seg_mask=pred_label))
return outputs, tmp_dir
| 38.964115
| 79
| 0.583349
|
7f543ec75b04ad1d3d1f0c61f140ba71ca68b433
| 1,034
|
py
|
Python
|
expert_python/src/thread_semaphore.py
|
MaiXiaochai/Droplet
|
6d7fed9ca76678768a3752fa8df86a021acc3509
|
[
"MIT"
] | null | null | null |
expert_python/src/thread_semaphore.py
|
MaiXiaochai/Droplet
|
6d7fed9ca76678768a3752fa8df86a021acc3509
|
[
"MIT"
] | null | null | null |
expert_python/src/thread_semaphore.py
|
MaiXiaochai/Droplet
|
6d7fed9ca76678768a3752fa8df86a021acc3509
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @File : thread_semaphore.py
# @Time : 2019/2/20 23:13
# @Author : MaiXiaochai
# @Site : https://github.com/MaiXiaochai
# semaphore 信号量
import threading
import time
class HtmlSpider(threading.Thread):
def __init__(self, url, sem):
super().__init__()
self.url = url
self.sem = sem
def run(self):
time.sleep(2)
print("got html text success")
self.sem.release()
class UrlProducer(threading.Thread):
def __init__(self, sem):
super().__init__()
self.sem = sem
def run(self):
for i in range(20):
# 每调用一次acquire方法,sem内部计数减1
# acquire和release方法成对儿使用
# 这个release要在Spider内部主代码执行完成后再释放,才是准确的
self.sem.acquire()
html_thread = HtmlSpider("https://baidu.com/{}".format(i), self.sem)
html_thread.start()
if __name__ == "__main__":
sem = threading.Semaphore(3)
url_producer = UrlProducer(sem)
url_producer.start()
| 22.478261
| 80
| 0.598646
|
962f0e92a5b4c6111541be21f45fc0413760c2f3
| 577
|
py
|
Python
|
repos/system_upgrade/common/actors/verifycheckresults/actor.py
|
sm00th/leapp-repository
|
1c171ec3a5f9260a3c6f84a9b15cad78a875ac61
|
[
"Apache-2.0"
] | 21
|
2018-11-20T15:58:39.000Z
|
2022-03-15T19:57:24.000Z
|
repos/system_upgrade/common/actors/verifycheckresults/actor.py
|
sm00th/leapp-repository
|
1c171ec3a5f9260a3c6f84a9b15cad78a875ac61
|
[
"Apache-2.0"
] | 732
|
2018-11-21T18:33:26.000Z
|
2022-03-31T16:16:24.000Z
|
repos/system_upgrade/common/actors/verifycheckresults/actor.py
|
sm00th/leapp-repository
|
1c171ec3a5f9260a3c6f84a9b15cad78a875ac61
|
[
"Apache-2.0"
] | 85
|
2018-11-20T17:55:00.000Z
|
2022-03-29T09:40:31.000Z
|
from leapp.actors import Actor
from leapp.reporting import Report
from leapp.libraries.actor.verifycheckresults import check
from leapp.tags import ReportPhaseTag, IPUWorkflowTag
class VerifyCheckResults(Actor):
"""
Check all generated results messages and notify user about them.
A report file containing all messages will be generated, together with log messages displayed
to the user.
"""
name = 'verify_check_results'
consumes = (Report,)
produces = ()
tags = (ReportPhaseTag, IPUWorkflowTag)
def process(self):
check()
| 26.227273
| 97
| 0.731369
|
630fb211536c757cf938839d1098e5eab5a376be
| 49,472
|
py
|
Python
|
wbia/control/controller_inject.py
|
WildMeOrg/wildbook-ia
|
a18d57611e5936bea02a964716466e062415aa1a
|
[
"Apache-2.0"
] | 20
|
2021-01-19T23:17:21.000Z
|
2022-03-21T10:25:56.000Z
|
wbia/control/controller_inject.py
|
solomonkimunyu/wildbook-ia
|
ac433d4f2a47b1d905c421a36c497f787003afc3
|
[
"Apache-2.0"
] | 16
|
2021-01-28T23:05:29.000Z
|
2022-03-31T20:39:36.000Z
|
wbia/control/controller_inject.py
|
solomonkimunyu/wildbook-ia
|
ac433d4f2a47b1d905c421a36c497f787003afc3
|
[
"Apache-2.0"
] | 9
|
2021-02-13T20:19:46.000Z
|
2022-03-29T10:47:11.000Z
|
# -*- coding: utf-8 -*-
"""
TODO:
Move flask registering into another file.
Should also make the actual flask registration lazy.
It should only be executed if a web instance is being started.
python -c "import wbia"
"""
import logging
import utool as ut
import sys
from wbia import dtool
from datetime import timedelta
from functools import update_wrapper
import warnings
from functools import wraps
from os.path import abspath, join, dirname
# import simplejson as json
# import json
# import pickle
import traceback
from hashlib import sha1
import os
# import numpy as np
import hmac
from wbia import constants as const
import string
import random
import base64
# <flask>
# TODO: allow optional flask import
try:
import flask
from flask import session, request
HAS_FLASK = True
except Exception:
HAS_FLASK = False
msg = 'Missing flask and/or Flask-session.\n' 'pip install Flask'
warnings.warn(msg)
if ut.STRICT:
raise
try:
# from flask.ext.cors import CORS
from flask_cors import CORS
HAS_FLASK_CORS = True
except Exception:
HAS_FLASK_CORS = False
warnings.warn('Missing flask.ext.cors')
if ut.SUPER_STRICT:
raise
try:
from flask_cas import CAS # NOQA
from flask_cas import login_required as login_required_cas
# from flask.ext.cas import CAS
# from flask.ext.cas import login_required
# HAS_FLASK_CAS = True
HAS_FLASK_CAS = False
except Exception:
HAS_FLASK_CAS = False
login_required_cas = ut.identity
msg = (
'Missing flask.ext.cas.\n'
'To install try pip install git+https://github.com/cameronbwhite/Flask-CAS.git'
)
warnings.warn(msg)
# sudo
print('')
if ut.SUPER_STRICT:
raise
# </flask>
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
# INJECTED_MODULES = []
UTOOL_AUTOGEN_SPHINX_RUNNING = not (
os.environ.get('UTOOL_AUTOGEN_SPHINX_RUNNING', 'OFF') == 'OFF'
)
GLOBAL_APP_ENABLED = (
not UTOOL_AUTOGEN_SPHINX_RUNNING and not ut.get_argflag('--no-flask') and HAS_FLASK
)
GLOBAL_APP_NAME = 'IBEIS'
GLOBAL_APP_SECRET = os.urandom(64)
GLOBAL_APP = None
GLOBAL_CORS = None
GLOBAL_CAS = None
REMOTE_PROXY_URL = None
REMOTE_PROXY_PORT = 5001
WEB_DEBUG_INCLUDE_TRACE = True
CONTROLLER_CLASSNAME = 'IBEISController'
MICROSOFT_API_ENABLED = ut.get_argflag('--web') and ut.get_argflag(
'--microsoft'
) # True == Microsoft Deployment (i.e., only allow MICROSOFT_API_PREFIX prefix below)
MICROSOFT_API_PREFIX = '/v0.1/wildbook/'
MICROSOFT_API_DEBUG = True
if MICROSOFT_API_ENABLED:
WEB_DEBUG_INCLUDE_TRACE = MICROSOFT_API_DEBUG
STRICT_VERSION_API = (
False # True == Microsoft Deployment (i.e., only allow /wildme/v0.1/ prefixes)
)
def get_flask_app(templates_auto_reload=True):
# TODO this should be initialized explicity in entry_points.py only if needed
global GLOBAL_APP
global GLOBAL_CORS
global GLOBAL_CAS
global HAS_FLASK
if not HAS_FLASK:
logger.info('flask is not installed')
return None
if GLOBAL_APP is None:
if hasattr(sys, '_MEIPASS'):
# hack for pyinstaller directory
root_dpath = sys._MEIPASS
else:
root_dpath = abspath(dirname(dirname(__file__)))
tempalte_dpath = join(root_dpath, 'web', 'templates')
static_dpath = join(root_dpath, 'web', 'static')
if ut.VERBOSE:
logger.info('[get_flask_app] root_dpath = %r' % (root_dpath,))
logger.info('[get_flask_app] tempalte_dpath = %r' % (tempalte_dpath,))
logger.info('[get_flask_app] static_dpath = %r' % (static_dpath,))
logger.info('[get_flask_app] GLOBAL_APP_NAME = %r' % (GLOBAL_APP_NAME,))
GLOBAL_APP = flask.Flask(
GLOBAL_APP_NAME, template_folder=tempalte_dpath, static_folder=static_dpath
)
if ut.VERBOSE:
logger.info(
'[get_flask_app] USING FLASK SECRET KEY: %r' % (GLOBAL_APP_SECRET,)
)
GLOBAL_APP.secret_key = GLOBAL_APP_SECRET
if templates_auto_reload:
GLOBAL_APP.config['TEMPLATES_AUTO_RELOAD'] = True
GLOBAL_APP.QUERY_OBJECT = None
GLOBAL_APP.QUERY_OBJECT_JOBID = None
GLOBAL_APP.QUERY_OBJECT_FEEDBACK_BUFFER = []
GLOBAL_APP.GRAPH_CLIENT_DICT = {}
if HAS_FLASK_CORS:
GLOBAL_CORS = CORS(
GLOBAL_APP, resources={r'/api/*': {'origins': '*'}}
) # NOQA
# if HAS_FLASK_CAS:
# GLOBAL_CAS = CAS(GLOBAL_APP, '/cas')
# GLOBAL_APP.config['SESSION_TYPE'] = 'memcached'
# GLOBAL_APP.config['SECRET_KEY'] = GLOBAL_APP_SECRET
# GLOBAL_APP.config['CAS_SERVER'] = 'https://cas-auth.rpi.edu'
# GLOBAL_APP.config['CAS_AFTER_LOGIN'] = 'root'
return GLOBAL_APP
# try and load flask
try:
if GLOBAL_APP_ENABLED:
get_flask_app()
except AttributeError:
logger.info('Warning flask is broken in python-3.4.0')
GLOBAL_APP_ENABLED = False
HAS_FLASK = False
class WebException(ut.NiceRepr, Exception):
def __init__(self, message, rawreturn=None, code=400):
self.code = code
self.message = message
self.rawreturn = rawreturn
from wbia.web.app import PROMETHEUS
if PROMETHEUS:
ibs = flask.current_app.ibs
tag = '%s' % (self.code,)
ibs.prometheus_increment_exception(tag)
def get_rawreturn(self, debug_stack_trace=False):
if self.rawreturn is None:
if debug_stack_trace:
return str(traceback.format_exc())
else:
return None
else:
return self.rawreturn
def __nice__(self):
return '(%r: %r)' % (self.code, self.message)
class WebMissingUUIDException(WebException):
def __init__(self, missing_image_uuid_list=[], missing_annot_uuid_list=[]):
args = (
len(missing_image_uuid_list),
len(missing_annot_uuid_list),
)
message = 'Missing image and/or annotation UUIDs (%d, %d)' % args
rawreturn = {
'missing_image_uuid_list': missing_image_uuid_list,
'missing_annot_uuid_list': missing_annot_uuid_list,
}
code = 600
super(WebMissingUUIDException, self).__init__(message, rawreturn, code)
class WebDuplicateUUIDException(WebException):
def __init__(self, qdup_pos_map={}, ddup_pos_map={}):
message = (
'Some UUIDs are specified more than once at positions:\n'
'duplicate_database_uuids=%s\n'
'duplicate_query_uuids=%s\n'
) % (ut.repr3(qdup_pos_map, nl=1), ut.repr3(ddup_pos_map, nl=1))
qdup_pos_map_ = {str(k): v for k, v in qdup_pos_map.items()}
ddup_pos_map_ = {str(k): v for k, v in ddup_pos_map.items()}
rawreturn = {
'qdup_pos_map': qdup_pos_map_,
'ddup_pos_map': ddup_pos_map_,
}
code = 601
super(WebDuplicateUUIDException, self).__init__(message, rawreturn, code)
class WebUnknownUUIDException(WebException):
def __init__(self, unknown_uuid_type_list, unknown_uuid_list):
uuid_type_str = ', '.join(sorted(set(unknown_uuid_type_list)))
args = (
uuid_type_str,
len(unknown_uuid_list),
)
message = 'Unknown %s UUIDs (%d)' % args
rawreturn = {
'unknown_uuid_type_list': unknown_uuid_type_list,
'unknown_uuid_list': unknown_uuid_list,
}
code = 602
super(WebUnknownUUIDException, self).__init__(message, rawreturn, code)
class WebReviewNotReadyException(WebException):
def __init__(self, query_uuid):
args = (query_uuid,)
message = 'The query_uuid %r is not yet ready for review' % args
rawreturn = {
'query_uuid': query_uuid,
}
code = 603
super(WebReviewNotReadyException, self).__init__(message, rawreturn, code)
class WebUnavailableUUIDException(WebException):
def __init__(self, unavailable_annot_uuid_list, query_uuid):
self.query_uuid = query_uuid
args = (query_uuid,)
message = (
'A running query %s is using (at least one of) the requested annotations. Filter out these annotations from the new query or stop the previous query.'
% args
)
rawreturn = {
'unavailable_annot_uuid_list': unavailable_annot_uuid_list,
'query_uuid': query_uuid,
}
code = 604
super(WebUnavailableUUIDException, self).__init__(message, rawreturn, code)
class WebReviewFinishedException(WebException):
def __init__(self, query_uuid):
args = (query_uuid,)
message = 'The query_uuid %r has nothing more to review' % args
rawreturn = {
'query_uuid': query_uuid,
}
code = 605
super(WebReviewFinishedException, self).__init__(message, rawreturn, code)
class WebMultipleNamedDuplicateException(WebException):
def __init__(self, bad_dict):
message = (
'Duplcate UUIDs are specified with more than one name:\n'
'bad_database_uuids=%s\n'
) % (
ut.repr3(bad_dict, nl=1),
)
bad_dict = {str(k): v for k, v in bad_dict.items()}
rawreturn = {
'bad_dict': bad_dict,
}
code = 606
super(WebMultipleNamedDuplicateException, self).__init__(message, rawreturn, code)
class WebMatchThumbException(WebException):
def __init__(self, reference, qannot_uuid, dannot_uuid, version, message):
rawreturn = {
'reference': reference,
'qannot_uuid': qannot_uuid,
'dannot_uuid': dannot_uuid,
'version': version,
}
code = 607
super(WebMatchThumbException, self).__init__(message, rawreturn, code)
class WebInvalidUUIDException(WebException):
def __init__(self, invalid_image_uuid_list=[], invalid_annot_uuid_list=[]):
args = (
len(invalid_image_uuid_list),
len(invalid_annot_uuid_list),
)
message = 'Invalid image and/or annotation UUIDs (%d, %d)' % args
rawreturn = {
'invalid_image_uuid_list': invalid_image_uuid_list,
'invalid_annot_uuid_list': invalid_annot_uuid_list,
}
code = 608
super(WebInvalidUUIDException, self).__init__(message, rawreturn, code)
class WebInvalidMatchException(WebException):
def __init__(self, qaid_list, daid_list):
message = 'The ID request is invalid because the daid_list is empty (after filtering out the qaid_list)'
rawreturn = {
'qaid_list': qaid_list,
'daid_list': daid_list,
}
code = 609
super(WebInvalidMatchException, self).__init__(message, rawreturn, code)
class WebMissingInput(WebException):
def __init__(self, message, key=None):
rawreturn = {}
if key is not None:
rawreturn['parameter'] = key
if message is not None:
rawreturn['message'] = message
code = 400
super(WebMissingInput, self).__init__(message, rawreturn, code)
class WebInvalidInput(WebException):
def __init__(self, message, key=None, value=None, image=False):
rawreturn = {}
if key is not None:
rawreturn['parameter'] = key
if value is not None:
rawreturn['value'] = value
if message is not None:
rawreturn['message'] = message
code = 415 if image else 400
super(WebInvalidInput, self).__init__(message, rawreturn, code)
class WebRuntimeException(WebException):
def __init__(self, message):
rawreturn = {'message': message}
code = 500
super(WebRuntimeException, self).__init__(message, rawreturn, code)
def translate_wbia_webreturn(
rawreturn,
success=True,
code=None,
message=None,
jQuery_callback=None,
cache=None,
__skip_microsoft_validation__=False,
):
if MICROSOFT_API_ENABLED and not __skip_microsoft_validation__:
if rawreturn is not None:
assert isinstance(
rawreturn, dict
), 'Microsoft APIs must return a Python dictionary'
template = rawreturn
else:
if code is None:
code = ''
if message is None:
message = ''
if cache is None:
cache = -1
template = {
'status': {
'success': success,
'code': code,
'message': message,
'cache': cache,
# 'debug': {} # TODO
},
'response': rawreturn,
}
response = ut.to_json(template)
if jQuery_callback is not None and isinstance(jQuery_callback, str):
logger.info('[web] Including jQuery callback function: %r' % (jQuery_callback,))
response = '%s(%s)' % (jQuery_callback, response)
return response
def _process_input(multidict=None):
if multidict is None:
return {}
if isinstance(multidict, dict):
from werkzeug.datastructures import ImmutableMultiDict
multidict = ImmutableMultiDict([item for item in multidict.items()])
kwargs2 = {}
for (arg, value) in multidict.lists():
if len(value) > 1:
raise WebException('Cannot specify a parameter more than once: %r' % (arg,))
# value = str(value[0])
value = value[0]
if (
',' in value
and '[' not in value
and ']' not in value
and '{' not in value
and '}' not in value
):
value = '[%s]' % (value,)
if value in ['True', 'False']:
value = value.lower()
try:
converted = ut.from_json(value)
except Exception:
# try making string and try again...
try:
value_ = '"%s"' % (value,)
converted = ut.from_json(value_)
except Exception as ex:
logger.info('FAILED TO JSON CONVERT: %s' % (ex,))
logger.info(ut.repr3(value))
converted = value
if arg.endswith('_list') and not isinstance(converted, (list, tuple)):
if isinstance(converted, str) and ',' in converted:
converted = converted.strip().split(',')
else:
converted = [converted]
# Allow JSON formatted strings to be placed into note fields
if (arg.endswith('note_list') or arg.endswith('notes_list')) and isinstance(
converted, (list, tuple)
):
type_ = type(converted)
temp_list = []
for _ in converted:
if isinstance(_, dict):
temp_list.append('%s' % (_,))
else:
temp_list.append(_)
converted = type_(temp_list)
kwargs2[arg] = converted
return kwargs2
def translate_wbia_webcall(func, *args, **kwargs):
r"""
Called from flask request context
Args:
func (function): live python function
Returns:
tuple: (output, True, 200, None, jQuery_callback)
Example:
>>> # xdoctest: +REQUIRES(--web-tests)
>>> from wbia.control.controller_inject import * # NOQA
>>> import wbia
>>> with wbia.opendb_with_web('testdb1') as (ibs, client):
... aids = client.get('/api/annot/').json
... failrsp = client.post('/api/annot/uuids/')
... failrsp2 = client.get('/api/query/chips/simple_dict//', data={'qaid_list': [0], 'daid_list': [0]})
... log_text = client.get('/api/query/chips/simple_dict/', data={'qaid_list': [0], 'daid_list': [0]})
>>> print('\n---\nfailrsp =\n%s' % (failrsp.data,))
>>> print('\n---\nfailrsp2 =\n%s' % (failrsp2.data,))
>>> print('Finished test')
Finished test
"""
assert len(args) == 0, 'There should not be any args=%r' % (args,)
# logger.info('Calling: %r with args: %r and kwargs: %r' % (func, args, kwargs, ))
ibs = flask.current_app.ibs
funcstr = ut.func_str(func, (ibs,) + args, kwargs=kwargs, truncate=True)
if 'heartbeat' in funcstr:
pass
elif 'metrics' in funcstr:
pass
else:
logger.info('[TRANSLATE] Calling: %s' % (funcstr,))
try:
key_list = sorted(list(kwargs.keys()))
type_list = []
message_list = []
for key in key_list:
try:
values = kwargs[key]
type_ = type(values).__name__
if type_ == 'list':
if len(values) == 0:
type_ = 'empty list'
message_ = '[]'
else:
value = values[0]
type_ += ' of ' + type(value).__name__
length1 = len(values)
try:
length2 = len(set(values))
except TypeError:
length2 = len(set(map(str, values)))
length3 = min(length1, 3)
mod = '...' if length1 != length3 else ''
message_ = 'length %d with unique %d of %s%s' % (
length1,
length2,
values[:length3],
mod,
)
else:
message_ = '%s' % (values,)
except Exception:
type_ = 'UNKNOWN'
message_ = 'ERROR IN PARSING'
type_list.append(type_)
message_list.append(message_)
zipped = list(zip(key_list, type_list, message_list))
if len(zipped) > 0:
length1 = max(list(map(len, key_list)))
length2 = max(list(map(len, type_list)))
for key_, type_, message_ in zipped:
key_ = key_.rjust(length1)
type_ = type_.ljust(length2)
try:
logger.info('[TRANSLATE] \t %s (%s) : %s' % (key_, type_, message_))
except UnicodeEncodeError:
logger.info('[TRANSLATE] \t %s (%s) : UNICODE ERROR')
except Exception:
logger.info('[TRANSLATE] ERROR IN KWARGS PARSING')
try:
# TODO, have better way to differentiate ibs funcs from other funcs
output = func(**kwargs)
except TypeError:
try:
output = func(ibs, **kwargs)
except WebException:
raise
except Exception as ex2: # NOQA
if MICROSOFT_API_ENABLED:
if isinstance(ex2, TypeError) and 'required positional' in str(ex2):
parameter = str(ex2).split(':')[1].strip().strip("'")
raise WebMissingInput('Missing required parameter', parameter)
elif isinstance(ex2, WebException):
raise
else:
raise WebRuntimeException(
'An unknown error has occurred, please contact the API administrator at dev@wildme.org.'
)
else:
msg_list = []
# msg_list.append('Error in translate_wbia_webcall')
msg_list.append('Expected Function Definition: ' + ut.func_defsig(func))
msg_list.append('Received Function Definition: %s' % (funcstr,))
msg_list.append('Received Function Parameters:')
for key in kwargs:
value = kwargs[key]
value_str = '%r' % (value,)
value_str = ut.truncate_str(value_str, maxlen=256)
msg_list.append('\t%r: %s' % (key, value_str))
# msg_list.append('\targs = %r' % (args,))
# msg_list.append('flask.request.args = %r' % (flask.request.args,))
# msg_list.append('flask.request.form = %r' % (flask.request.form,))
msg_list.append('%s: %s' % (type(ex2).__name__, ex2))
if WEB_DEBUG_INCLUDE_TRACE:
trace = str(traceback.format_exc())
msg_list.append(trace)
msg = '\n'.join(msg_list)
logger.info(msg)
# error_msg = ut.formatex(ex2, msg, tb=True)
# logger.info(error_msg)
# error_msg = ut.strip_ansi(error_msg)
# raise Exception(error_msg)
raise Exception(msg)
# raise
resp_tup = (output, True, 200, None)
return resp_tup
def authentication_challenge():
"""Sends a 401 response that enables basic auth."""
rawreturn = ''
success = False
code = 401
message = 'Could not verify your authentication, login with proper credentials.'
jQuery_callback = None
webreturn = translate_wbia_webreturn(
rawreturn, success, code, message, jQuery_callback
)
response = flask.make_response(webreturn, code)
response.headers['WWW-Authenticate'] = 'Basic realm="Login Required"'
return response
def authentication_user_validate():
"""
This function is called to check if a username /
password combination is valid.
"""
auth = flask.request.authorization
if auth is None:
return False
username = auth.username
password = auth.password
return username == 'wbia' and password == 'wbia'
def authentication_user_only(func):
@wraps(func)
def wrp_authenticate_user(*args, **kwargs):
if not authentication_user_validate():
return authentication_challenge()
return func(*args, **kwargs)
# wrp_authenticate_user = ut.preserve_sig(wrp_authenticate_user, func)
return wrp_authenticate_user
def create_key():
hyphen_list = [8, 13, 18, 23]
key_list = [
'-' if _ in hyphen_list else random.choice(string.hexdigits) for _ in range(36)
]
return ''.join(key_list).upper()
def get_signature(key, message):
def encode(x):
if not isinstance(x, bytes):
x = bytes(x, 'utf-8')
return x
def decode(x):
return x.decode('utf-8')
key = encode(key)
message = encode(message)
signature = hmac.new(key, message, sha1)
signature = signature.digest()
signature = base64.b64encode(signature)
signature = decode(signature)
signature = str(signature)
signature = signature.strip()
return signature
def get_url_authorization(url):
hash_ = get_signature(GLOBAL_APP_SECRET, url)
hash_challenge = '%s:%s' % (GLOBAL_APP_NAME, hash_)
return hash_challenge
def authentication_hash_validate():
"""
This function is called to check if a username /
password combination is valid.
"""
def last_occurence_delete(string, character):
index = string.rfind(character)
if index is None or index < 0:
return string
return string[:index] + string[index + 1 :]
hash_response = str(flask.request.headers.get('Authorization', ''))
if len(hash_response) == 0:
return False
hash_challenge_list = []
# Check normal url
url = str(flask.request.url)
hash_challenge = get_url_authorization(url)
hash_challenge_list.append(hash_challenge)
# If hash at the end of the url, try alternate hash as well
url = last_occurence_delete(url, '/')
hash_challenge = get_url_authorization(url)
hash_challenge_list.append(hash_challenge)
if '?' in url:
url.replace('?', '/?')
hash_challenge = get_url_authorization(url)
hash_challenge_list.append(hash_challenge)
return hash_response in hash_challenge_list
def authentication_hash_only(func):
@wraps(func)
def wrp_authentication_hash(*args, **kwargs):
if not authentication_hash_validate():
return authentication_challenge()
return func(*args, **kwargs)
return wrp_authentication_hash
def authentication_either(func):
"""authenticated by either hash or user"""
@wraps(func)
def wrp_authentication_either(*args, **kwargs):
if not (authentication_hash_validate() or authentication_user_validate()):
return authentication_challenge()
return func(*args, **kwargs)
return wrp_authentication_either
def crossdomain(
origin=None,
methods=None,
headers=None,
max_age=21600,
attach_to_all=True,
automatic_options=True,
):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = flask.current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
logger.info(origin)
logger.info(flask.request.method)
if automatic_options and flask.request.method == 'OPTIONS':
resp = flask.current_app.make_default_options_response()
else:
resp = flask.make_response(f(*args, **kwargs))
if not attach_to_all and flask.request.method != 'OPTIONS':
return resp
h = resp.headers
logger.info(origin)
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Origin'] = '*'
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def remote_api_wrapper(func):
def remote_api_call(ibs, *args, **kwargs):
if REMOTE_PROXY_URL is None:
return func(ibs, *args, **kwargs)
else:
co_varnames = func.func_code.co_varnames
if co_varnames[0] == 'ibs':
co_varnames = tuple(co_varnames[1:])
kwargs_ = dict(zip(co_varnames, args))
kwargs.update(kwargs_)
kwargs.pop('ibs', None)
return api_remote_wbia(REMOTE_PROXY_URL, func, REMOTE_PROXY_PORT, **kwargs)
remote_api_call = ut.preserve_sig(remote_api_call, func)
return remote_api_call
API_SEEN_SET = set([])
def get_wbia_flask_api(__name__, DEBUG_PYTHON_STACK_TRACE_JSON_RESPONSE=False):
"""For function calls that resolve to api calls and return json."""
if __name__ == '__main__':
return ut.dummy_args_decor
if GLOBAL_APP_ENABLED:
def register_api(
rule, __api_plural_check__=True, __api_microsoft_check__=True, **options
):
global API_SEEN_SET
assert rule.endswith('/'), 'An API should always end in a forward-slash'
assert (
'methods' in options
), 'An api should always have a specified methods list'
rule_ = rule + ':'.join(options['methods'])
if rule_ in API_SEEN_SET:
msg = 'An API rule (%s) has been duplicated' % (rule_,)
warnings.warn(msg + '. Ignoring duplicate (may break web)')
return ut.identity
# raise AssertionError(msg)
API_SEEN_SET.add(rule_)
if MICROSOFT_API_ENABLED and __api_microsoft_check__:
if not rule.startswith(MICROSOFT_API_PREFIX):
# msg = 'API rule=%r is does not adhere to the Microsoft format, ignoring.' % (rule_, )
# warnings.warn(msg)
return ut.identity
else:
logger.info('Registering API rule=%r' % (rule_,))
try:
if not MICROSOFT_API_ENABLED:
if not rule.startswith('/v0.1/wildbook/'):
assert (
'annotation' not in rule
), 'An API rule should use "annot" instead of annotation(s)"'
assert (
'imgset' not in rule
), 'An API should use "imageset" instead of imgset(s)"'
assert '_' not in rule, 'An API should never contain an underscore'
assert '-' not in rule, 'An API should never contain a hyphen'
if __api_plural_check__:
assert 's/' not in rule, 'Use singular (non-plural) URL routes'
check_list = [
'annotgroup',
'autogen',
'chip',
'config',
# 'contributor',
'gar',
'metadata',
]
for check in check_list:
assert '/api/%s/' % (check,) not in rule, 'failed check=%r' % (check,)
except Exception:
iswarning = not ut.SUPER_STRICT
ut.printex(
'CONSIDER RENAMING API RULE: %r' % (rule,),
iswarning=iswarning,
tb=True,
)
if not iswarning:
raise
# accpet args to flask.route
def regsiter_closure(func):
# make translation function in closure scope
# and register it with flask.
app = get_flask_app()
@app.route(rule, **options)
# @crossdomain(origin='*')
# @authentication_either
@wraps(func)
# def translated_call(*args, **kwargs):
def translated_call(**kwargs):
def html_newlines(text):
r = '<br />\n'
text = text.replace(' ', ' ')
text = (
text.replace('\r\n', r)
.replace('\n\r', r)
.replace('\r', r)
.replace('\n', r)
)
return text
__format__ = False # Default __format__ value
ignore_cookie_set = False
try:
# logger.info('Processing: %r with args: %r and kwargs: %r' % (func, args, kwargs, ))
# Pipe web input into Python web call
kwargs2 = _process_input(flask.request.args)
kwargs3 = _process_input(flask.request.form)
try:
# kwargs4 = _process_input(flask.request.get_json())
kwargs4 = ut.from_json(flask.request.data)
except Exception:
kwargs4 = {}
kwargs.update(kwargs2)
kwargs.update(kwargs3)
kwargs.update(kwargs4)
# Update the request object to include the final rectified inputs for possible future reference
flask.request.processed = ut.to_json(kwargs)
jQuery_callback = None
if 'callback' in kwargs and 'jQuery' in kwargs['callback']:
jQuery_callback = str(kwargs.pop('callback', None))
kwargs.pop('_', None)
# logger.info('KWARGS: %s' % (kwargs, ))
# logger.info('COOKIES: %s' % (request.cookies, ))
__format__ = request.cookies.get('__format__', None)
__format__ = kwargs.pop('__format__', __format__)
if __format__ is not None:
__format__ = str(__format__).lower()
ignore_cookie_set = __format__ in ['onetime', 'true']
__format__ = __format__ in ['true', 'enabled', 'enable']
from wbia.web.app import PROMETHEUS
if PROMETHEUS:
exclude_tag_list = [
'/api/test/heartbeat/',
'/v0.1/wildbook/status/',
'/v0.1/vulcan/status/',
]
tag = request.url_rule.rule
if tag not in exclude_tag_list:
ibs = flask.current_app.ibs
ibs.prometheus_increment_api(tag)
resp_tup = translate_wbia_webcall(func, **kwargs)
rawreturn, success, code, message = resp_tup
except WebException as webex:
# ut.printex(webex)
logger.info('CAUGHT2: %r' % (webex,))
rawreturn = webex.get_rawreturn(
DEBUG_PYTHON_STACK_TRACE_JSON_RESPONSE
)
success = False
code = webex.code
message = webex.message
jQuery_callback = None
except Exception as ex:
logger.info('CAUGHT2: %r' % (ex,))
# ut.printex(ex)
rawreturn = None
if DEBUG_PYTHON_STACK_TRACE_JSON_RESPONSE:
rawreturn = str(traceback.format_exc())
success = False
code = 500
message = str(ex)
# errmsg = str(ex)
# message = 'API error, Python Exception thrown: %s' % (errmsg)
if "'int' object is not iterable" in message:
rawreturn = """
HINT: the input for this call is most likely expected to be a list.
Try adding a comma at the end of the input (to cast the conversion into a list) or encapsulate the input with [].
"""
jQuery_callback = None
# logger.info('RECEIVED FORMAT: %r' % (__format__, ))
if __format__:
# Hack for readable error messages
webreturn = translate_wbia_webreturn(
rawreturn, success, code, message, jQuery_callback
)
webreturn = ut.repr3(ut.from_json(webreturn), strvals=True)
try:
from ansi2html import Ansi2HTMLConverter
conv = Ansi2HTMLConverter()
webreturn = conv.convert(webreturn)
except ImportError as ex:
ut.printex(ex, 'pip install ansi2html', iswarning=True)
webreturn = ut.strip_ansi(webreturn)
webreturn = (
'<p><samp>\n' + html_newlines(webreturn) + '\n</samp></p>'
)
webreturn = (
'<meta http-equiv="Content-Type" content="text/html;charset=ISO-8859-8">\n'
+ webreturn
)
def get_func_href(funcname):
url = (
'http://'
+ request.environ['HTTP_HOST']
+ flask.url_for(funcname)
+ '?__format__=True'
)
return '<a href="{url}">{url}</a>'.format(url=url)
if not success:
webreturn += (
'<pre>See logs for details: %s</pre>'
% get_func_href('get_current_log_text')
)
webreturn += (
'<pre>Might also look into db_info: %s</pre>'
% get_func_href('get_dbinfo')
)
else:
webreturn = translate_wbia_webreturn(
rawreturn, success, code, message, jQuery_callback
)
webreturn = ut.strip_ansi(webreturn)
resp = flask.make_response(webreturn, code)
resp.status_code = code
if not __format__:
resp.headers['Content-Type'] = 'application/json; charset=utf-8'
resp.headers['mimetype'] = 'application/json'
if not ignore_cookie_set:
if __format__:
resp.set_cookie('__format__', 'enabled')
else:
resp.set_cookie('__format__', '', expires=0)
return resp
# return the original unmodified function
if REMOTE_PROXY_URL is None:
return func
else:
return remote_api_wrapper(func)
return regsiter_closure
return register_api
else:
return ut.dummy_args_decor
def authenticated():
return get_user(username=None) is not None
def authenticate(username, **kwargs):
get_user(username=username, **kwargs)
def deauthenticate():
get_user(username=False)
def get_user(username=None, name=None, organization=None):
USER_KEY = '_USER_'
if USER_KEY not in session:
session[USER_KEY] = None
if username is not None:
if username in [False]:
# De-authenticate
session[USER_KEY] = None
else:
# Authenticate
assert isinstance(username, str), 'user must be a string'
username = username.lower()
session[USER_KEY] = {
'username': username,
'name': name,
'organization': organization,
}
return session[USER_KEY]
def login_required_session(function):
@wraps(function)
def wrap(*args, **kwargs):
if not authenticated():
from wbia.web import appfuncs as appf
refer = flask.request.url.replace(flask.request.url_root, '')
refer = appf.encode_refer_url(refer)
return flask.redirect(flask.url_for('login', refer=refer))
else:
return function(*args, **kwargs)
return wrap
def get_wbia_flask_route(__name__):
"""For function calls that resolve to webpages and return html."""
if __name__ == '__main__':
return ut.dummy_args_decor
if GLOBAL_APP_ENABLED:
def register_route(
rule,
__route_prefix_check__=True,
__route_postfix_check__=True,
__route_authenticate__=True,
__route_microsoft_check__=True,
**options,
):
# GLOBALLY DISABLE LOGINS
__route_authenticate__ = False
if MICROSOFT_API_ENABLED and __route_microsoft_check__:
__route_authenticate__ = False
if not rule.startswith(MICROSOFT_API_PREFIX):
# msg = 'Route rule=%r not allowed with the Microsoft format, ignoring.' % (rule, )
# warnings.warn(msg)
return ut.identity
else:
logger.info('Registering Route rule=%r' % (rule,))
if __route_prefix_check__:
assert not rule.startswith(
'/api/'
), 'Cannot start a route rule (%r) with the prefix "/api/"' % (rule,)
else:
__route_authenticate__ = False
if __route_postfix_check__:
assert rule.endswith('/'), 'A route should always end in a forward-slash'
assert (
'methods' in options
), 'A route should always have a specified methods list'
# if '_' in rule:
# logger.info('CONSIDER RENAMING RULE: %r' % (rule, ))
# accpet args to flask.route
def regsiter_closure(func):
# make translation function in closure scope
# and register it with flask.
app = get_flask_app()
# login_required = login_required_cas if HAS_FLASK_CAS else login_required_session
login_required = login_required_session
if not __route_authenticate__:
login_required = ut.identity
@app.route(rule, **options)
# @crossdomain(origin='*')
# @authentication_user_only
@login_required
@wraps(func)
def translated_call(**kwargs):
# debug = {'kwargs': kwargs}
try:
# Pipe web input into Python web call
kwargs2 = _process_input(flask.request.args)
kwargs3 = _process_input(flask.request.form)
try:
# kwargs4 = _process_input(flask.request.get_json())
kwargs4 = ut.from_json(flask.request.data)
except Exception:
kwargs4 = {}
kwargs.update(kwargs2)
kwargs.update(kwargs3)
kwargs.update(kwargs4)
jQuery_callback = None
if 'callback' in kwargs and 'jQuery' in kwargs['callback']:
jQuery_callback = str(kwargs.pop('callback', None))
kwargs.pop('_', None)
args = ()
logger.info(
'Processing: %r with args: %r and kwargs: %r'
% (func, args, kwargs)
)
from wbia.web.app import PROMETHEUS
if PROMETHEUS:
ibs = flask.current_app.ibs
tag = request.url_rule.rule
ibs.prometheus_increment_route(tag)
result = func(**kwargs)
except Exception as ex:
rawreturn = str(traceback.format_exc())
success = False
code = 400
message = 'Route error, Python Exception thrown: %r' % (str(ex),)
jQuery_callback = None
result = translate_wbia_webreturn(
rawreturn,
success,
code,
message,
jQuery_callback,
__skip_microsoft_validation__=True,
)
return result
# wrp_getter_cacher = ut.preserve_sig(wrp_getter_cacher, getter_func)
# return the original unmodified function
return func
return regsiter_closure
return register_route
else:
return ut.dummy_args_decor
def api_remote_wbia(remote_wbia_url, remote_api_func, remote_wbia_port=5001, **kwargs):
import requests
if GLOBAL_APP_ENABLED and GLOBAL_APP is None:
raise ValueError('Flask has not been initialized')
api_name = remote_api_func.__name__
route_list = list(GLOBAL_APP.url_map.iter_rules(api_name))
assert len(route_list) == 1, 'More than one route resolved'
route = route_list[0]
api_route = route.rule
assert api_route.startswith('/api/'), 'Must be an API route'
method_list = sorted(list(route.methods - set(['HEAD', 'OPTIONS'])))
remote_api_method = method_list[0].upper()
assert api_route is not None, 'Route could not be found'
args = (remote_wbia_url, remote_wbia_port, api_route)
remote_api_url = 'http://%s:%s%s' % args
headers = {'Authorization': get_url_authorization(remote_api_url)}
for key in kwargs.keys():
value = kwargs[key]
if isinstance(value, (tuple, list, set)):
value = str(list(value))
kwargs[key] = value
logger.info('[REMOTE] %s' % ('-' * 80,))
logger.info('[REMOTE] Calling remote IBEIS API: %r' % (remote_api_url,))
logger.info('[REMOTE] \tMethod: %r' % (remote_api_method,))
if ut.DEBUG2 or ut.VERBOSE:
logger.info('[REMOTE] \tHeaders: %s' % (ut.repr2(headers),))
logger.info('[REMOTE] \tKWArgs: %s' % (ut.repr2(kwargs),))
# Make request to server
try:
if remote_api_method == 'GET':
req = requests.get(remote_api_url, headers=headers, data=kwargs, verify=False)
elif remote_api_method == 'POST':
req = requests.post(
remote_api_url, headers=headers, data=kwargs, verify=False
)
elif remote_api_method == 'PUT':
req = requests.put(remote_api_url, headers=headers, data=kwargs, verify=False)
elif remote_api_method == 'DELETE':
req = requests.delete(
remote_api_url, headers=headers, data=kwargs, verify=False
)
else:
message = '_api_result got unsupported method=%r' % (remote_api_method,)
raise KeyError(message)
except requests.exceptions.ConnectionError as ex:
message = '_api_result could not connect to server %s' % (ex,)
raise IOError(message)
response = req.text
converted = ut.from_json(value)
response = converted.get('response', None)
logger.info('[REMOTE] got response')
if ut.DEBUG2:
logger.info('response = %s' % (response,))
return response
##########################################################################################
def dev_autogen_explicit_imports():
r"""
CommandLine:
python -m wbia --tf dev_autogen_explicit_imports
Example:
>>> # SCRIPT
>>> from wbia.control.controller_inject import * # NOQA
>>> dev_autogen_explicit_imports()
"""
import wbia # NOQA
classname = CONTROLLER_CLASSNAME
logger.info(ut.autogen_import_list(classname))
def dev_autogen_explicit_injects():
r"""
CommandLine:
python -m wbia --tf dev_autogen_explicit_injects
Example:
>>> # SCRIPT
>>> from wbia.control.controller_inject import * # NOQA
>>> dev_autogen_explicit_injects()
"""
import wbia # NOQA
import wbia.control.IBEISControl
classname = CONTROLLER_CLASSNAME
regen_command = 'python -m wbia dev_autogen_explicit_injects'
conditional_imports = [
modname
for modname in wbia.control.IBEISControl.AUTOLOAD_PLUGIN_MODNAMES
if isinstance(modname, tuple)
]
source_block = ut.autogen_explicit_injectable_metaclass(
classname, regen_command, conditional_imports
)
dpath = ut.get_module_dir(wbia.control.IBEISControl)
fpath = ut.unixjoin(dpath, '_autogen_explicit_controller.py')
ut.writeto(fpath, source_block, verbose=2)
def make_ibs_register_decorator(modname):
"""builds variables and functions that controller injectable modules need."""
if __name__ == '__main__':
logger.info('WARNING: cannot register controller functions as main')
CLASS_INJECT_KEY = (CONTROLLER_CLASSNAME, modname)
# Create dectorator to inject these functions into the IBEISController
register_ibs_unaliased_method = ut.make_class_method_decorator(
CLASS_INJECT_KEY, modname
)
# TODO Replace IBEISContoller INEJECTED MODULES with this one
# INJECTED_MODULES.append(sys.modules[modname])
def register_ibs_method(func):
"""registers autogenerated functions with the utool class method injector"""
# func = profile(func)
register_ibs_unaliased_method(func)
# aliastup = (func, '_injected_' + ut.get_funcname(func))
# register_ibs_aliased_method(aliastup)
return func
return CLASS_INJECT_KEY, register_ibs_method
_decors_image = dtool.make_depcache_decors(const.IMAGE_TABLE)
_decors_annot = dtool.make_depcache_decors(const.ANNOTATION_TABLE)
_decors_part = dtool.make_depcache_decors(const.PART_TABLE)
register_preprocs = {
'image': _decors_image['preproc'],
'annot': _decors_annot['preproc'],
'part': _decors_part['preproc'],
}
register_subprops = {
'image': _decors_image['subprop'],
'annot': _decors_annot['subprop'],
'part': _decors_part['subprop'],
}
| 36.005822
| 163
| 0.555708
|
fcf54abe5bc88138e33b038b9014b49771c4b130
| 723
|
py
|
Python
|
src/create_gif.py
|
skilldisk/Handling-Images-using-Pillow-in-Python
|
49660b837b640a6f30c966c2b3f073b62d47a475
|
[
"Apache-2.0"
] | null | null | null |
src/create_gif.py
|
skilldisk/Handling-Images-using-Pillow-in-Python
|
49660b837b640a6f30c966c2b3f073b62d47a475
|
[
"Apache-2.0"
] | null | null | null |
src/create_gif.py
|
skilldisk/Handling-Images-using-Pillow-in-Python
|
49660b837b640a6f30c966c2b3f073b62d47a475
|
[
"Apache-2.0"
] | 1
|
2021-08-30T13:41:36.000Z
|
2021-08-30T13:41:36.000Z
|
from math import sin, pi, cos
from PIL import Image, ImageDraw
# with Image.open('../static/assets/base.png') as base:
images = []
circle_points = []
teta = 0
no_points = 30
delta_teta = 2*pi / no_points
for i in range(no_points+1):
line_base = Image.new('RGBA', (500,500), '#000000ff')
d = ImageDraw.Draw(line_base)
x = 250 + 200 * sin(teta)
y = 250 + 200 * cos(teta)
circle_points.append((x,y))
d.line(circle_points, fill='red', width=2)
images.append(line_base)
teta += delta_teta
for _ in range(3):
images.append(images[-1])
images[0].save('draw_circle.gif',
save_all = True, append_images = images[1:],
optimize = False, duration = 100, loop=0)
| 20.657143
| 57
| 0.629322
|
bed8e29b018d4173aaec86b47539f82274e27b0c
| 4,299
|
py
|
Python
|
ldb_reader.py
|
JeffpanUK/NuPyTools
|
4c8180d850fff175e24c302757ae745d62258bc0
|
[
"MIT"
] | 2
|
2017-08-05T17:27:34.000Z
|
2017-12-17T03:43:33.000Z
|
ldb_reader.py
|
JeffpanUK/NuPyTools
|
4c8180d850fff175e24c302757ae745d62258bc0
|
[
"MIT"
] | null | null | null |
ldb_reader.py
|
JeffpanUK/NuPyTools
|
4c8180d850fff175e24c302757ae745d62258bc0
|
[
"MIT"
] | 1
|
2019-10-26T00:03:27.000Z
|
2019-10-26T00:03:27.000Z
|
import os
import time
import re
import codecs
from lxml import etree
class LDBReader(object):
def __init__(self,logger,puncs):
self.logger = logger
self.puncs = puncs
def processBlk(self,blk):
newBlk=[]
for line in blk:
if "".join(line[:14]) == '<LD_SE_SYNTAX>':
#this block will crash lxml parsing, and no use in evaluation, so we ignore it
pass
else:
newBlk.append(line)
xml = "\n".join(newBlk)
parser = etree.XMLParser(recover=True)
root = etree.fromstring(xml,parser=parser)
phrases = []
words = []
spts = []
phrase=[]
for w in root.iterfind('WordRecord'):
isPhrase = False
isWord = False
for t in w:
if t.tag == 'LD_W_ORTH':
try:
text = t.text.strip()
except:
text = " "
text = re.sub(u"\u263ccomma\u263c","",text)
text = re.sub("[ \\-]","",text)
if len(text) > 0:
newtext = []
for c in text:
if c not in self.puncs:
newtext.append(c)
text = "".join(newtext)
elif t.tag == 'LD_W_TYPE':
wtype = t.text.strip()
if wtype == 'WORD_PHRASE':
isPhrase = True
break
elif wtype == 'WORD_DCT' or wtype == 'WORD_CROSSTOKEN':
isWord = True
word = text
phrase.append(word)
elif t.tag == 'LD_W_PHON':
spt = t.text.strip()
if isPhrase:
if len(phrase) != 0:
phrases.append("".join(phrase))
phrase =[]
elif isWord:
if word != "":
words.append(word)
sptA = re.split("-",spt)
spts.extend(sptA)
if len(phrase) != 0:
phrases.append("".join(phrase))
phrase =[]
#print "WORD :%s" % (" ".join(words))
#print "PHRASE:%s" % (" ".join(phrases))
#print "SPT :%s" % ("-".join(spts))
return (phrases, words, spts)
def processLDBFile(self,ldbFile):
f = open(ldbFile,'r')
blk=f.readlines()
f.close()
sent = self.processBlk(blk)
return sent
def process(self,folder):
if not os.path.isdir(folder):
self.logger.error("LDB Folder:%s is invalid" % folder)
return
else:
self.logger.info("Processing LDB Folder:%s" % folder)
flist = os.listdir(folder)
#filter all ldb files
ldblist = []
for fn in flist:
fnA = re.split("\\.",fn)
if fnA[-1] != 'ldb':
continue
else:
ldblist.append(os.path.join(folder,fn))
#sort files by suffix in file name
ldblist = sorted(ldblist,key=lambda fn:int(re.split("[._]",fn)[-2]))
sents = []
count = 0
total = len(ldblist)
for fn in ldblist:
count+=1
self.logger.debug("File:%s, Status:%0.2f(%d/%d)" % (fn,float(count)/total,count,total))
sent = self.processLDBFile(fn)
sents.append(sent)
return sents
def processRootFolder(self, folder):
if not os.path.isdir(folder):
self.logger.error("ROOT Folder:%s is invalid" % folder)
return
else:
self.logger.info("Processing ROOT Folder:%s, Only files in sub folders will be processed" % folder)
flist = os.listdir(folder)
subs = []
for f in flist:
if not os.path.isdir(os.path.join(folder, f)):
continue
subs.append(os.path.join(folder, f))
allSents = []
for sfolder in subs:
sents = self.process(sfolder)
allSents.extend(sents)
self.logger.info("Total sents:%d" % len(allSents))
return allSents
| 30.928058
| 111
| 0.457548
|
0222a959ad5909f26d0020e1376a3d95c6d04135
| 2,916
|
py
|
Python
|
venv/lib/python2.7/site-packages/github/tests/Issue140.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | 3
|
2019-04-09T22:59:33.000Z
|
2019-06-14T09:23:24.000Z
|
venv/lib/python2.7/site-packages/github/tests/Issue140.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/github/tests/Issue140.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import Framework
import github
class Issue140(Framework.TestCase): # https://github.com/jacquev6/PyGithub/issues/140
def setUp(self):
Framework.TestCase.setUp(self)
self.repo = self.g.get_repo("twitter/bootstrap")
def testGetDirContentsThenLazyCompletionOfFile(self):
contents = self.repo.get_dir_contents("/js")
self.assertEqual(len(contents), 15)
n = 0
for content in contents:
if content.path == "js/bootstrap-affix.js":
self.assertEqual(len(content.content), 4722) # Lazy completion
n += 1
elif content.path == "js/tests":
self.assertEqual(content.content, None) # No completion at all
n += 1
self.assertEqual(n, 2)
def testGetFileContents(self):
contents = self.repo.get_file_contents("/js/bootstrap-affix.js")
self.assertEqual(contents.encoding, "base64")
self.assertEqual(contents.url, "https://api.github.com/repos/twitter/bootstrap/contents/js/bootstrap-affix.js")
self.assertEqual(len(contents.content), 4722)
def testGetDirContentsWithRef(self):
self.assertEqual(len(self.repo.get_dir_contents("/js", "8c7f9c66a7d12f47f50618ef420868fe836d0c33")), 15)
| 54
| 119
| 0.502743
|
079c6119547bc4d86da2c3b98ba4a3066155a93d
| 182
|
py
|
Python
|
currencies/conf.py
|
CargobaseDev/django-currencies
|
ff722618ad5248da3b592c96c186cc93846796dc
|
[
"BSD-3-Clause"
] | 8
|
2015-06-07T02:25:23.000Z
|
2020-10-06T05:19:59.000Z
|
currencies/conf.py
|
CargobaseDev/django-currencies
|
ff722618ad5248da3b592c96c186cc93846796dc
|
[
"BSD-3-Clause"
] | 1
|
2015-04-03T05:40:04.000Z
|
2015-04-14T10:44:35.000Z
|
currencies/conf.py
|
CargobaseDev/django-currencies
|
ff722618ad5248da3b592c96c186cc93846796dc
|
[
"BSD-3-Clause"
] | 4
|
2017-09-23T09:02:51.000Z
|
2021-06-25T05:21:12.000Z
|
# -*- coding: utf-8 -*-
from django.conf import settings
SESSION_PREFIX = getattr(settings, 'CURRENCY_SESSION_PREFIX', 'session')
SESSION_KEY = '%s:currency_code' % SESSION_PREFIX
| 26
| 72
| 0.747253
|
cb6db1b3a9b7437fd6be83069a0f639cb8b1a679
| 64
|
py
|
Python
|
T07-15/program.py
|
miguelgaoreiss/SSof-Project1920
|
0bf74c264e06966931d6a2e0b42134dfddc32eb4
|
[
"MIT"
] | 2
|
2019-11-20T19:26:07.000Z
|
2019-11-22T00:42:23.000Z
|
T07-15/program.py
|
miguelgaoreiss/SSof-Project1920
|
0bf74c264e06966931d6a2e0b42134dfddc32eb4
|
[
"MIT"
] | 2
|
2019-11-28T05:21:24.000Z
|
2019-11-28T05:21:58.000Z
|
T07-15/program.py
|
miguelgaoreiss/SSof-Project1920
|
0bf74c264e06966931d6a2e0b42134dfddc32eb4
|
[
"MIT"
] | 25
|
2019-11-27T01:40:56.000Z
|
2019-12-04T23:38:59.000Z
|
a = source()
if True:
b = source2()
c = a + b
sink(c)
| 8
| 17
| 0.453125
|
84cf9ba064d7f8cd63b7c98552fdbe83a2536203
| 4,141
|
py
|
Python
|
lenet.py
|
jonad/TrafficSignDetection
|
024e063f4b776c2489d2a0f7edcaa092961e8cca
|
[
"MIT"
] | null | null | null |
lenet.py
|
jonad/TrafficSignDetection
|
024e063f4b776c2489d2a0f7edcaa092961e8cca
|
[
"MIT"
] | null | null | null |
lenet.py
|
jonad/TrafficSignDetection
|
024e063f4b776c2489d2a0f7edcaa092961e8cca
|
[
"MIT"
] | null | null | null |
from utils import *
import tensorflow.contrib.slim as slim
import tensorflow as tf
import numpy as np
class LeNet():
def __init__(self,x, mu, sigma, bias_value,
conv1_params, conv2_params, p1_params,
p2_params, fc1_params, fc2_params, fc3_params, hold_prob):
self.mu = mu
self.sigma = sigma
self.x = x
self.conv1_params = conv1_params
self.conv2_params = conv2_params
self.p1_params = p1_params
self.p2_params = p2_params
self.fc1_params = fc1_params
self.fc2_params = fc2_params
self.fc3_params = fc3_params
self.bias_value = bias_value
self.hold_prob = hold_prob
self.logits = self._build_neural_net()
def _build_neural_net(self):
filter_height_c1, filter_width_c1, channel_in_c1, channel_out_c1, \
stride_c1, padding_c1 = self.conv1_params
shape_1 = [filter_height_c1, filter_width_c1, channel_in_c1, channel_out_c1]
# First layer: convolutional layer
conv_1 = convolutional_layer(self.x, shape_1,
self.mu, self.sigma, self.bias_value,
stride_c1, padding_c1, name="conv_1")
# First layer: pooling layer
ksize_p1, stride_p1, padding_p1 = self.p1_params
conv_1_pooling = max_pooling(conv_1, ksize_p1, stride_p1, padding_p1, name="max_pool_1")
# Second layer: convolutional layer
filter_height_c2, filter_width_c2, channel_in_c2, channel_out_c2, \
stride_c2, padding_c2 = self.conv2_params
shape_2 = [filter_height_c2, filter_width_c2, channel_in_c2, channel_out_c2]
conv_2 = convolutional_layer(conv_1_pooling, shape_2,
self.mu, self.sigma, self.bias_value,
stride_c2, padding_c2, name="conv_2")
# Second layer: pooling layer
ksize_p2, stride_p2, padding_p2 = self.p2_params
conv_2_pooling = max_pooling(conv_2, ksize_p2, stride_p2, padding_p2, name="max_pool_2")
# FLatten layer
#width, height, channel = conv_2_pooling.get_shape()[1:]
shape_4 =conv_2_pooling.get_shape().as_list() # a list: [None, 9, 2]
dim = np.prod(shape_4[1:])
#dimension = width*height*channel
conv_flat = tf.reshape(conv_2_pooling, [-1, dim])
# fully connected layer 1
output_activations_1 = self.fc1_params
fully_connected_layer_1 = tf.nn.relu(fully_connected_layer(conv_flat, output_activations_1, self.mu, self.sigma, self.bias_value,
name="fully_connected_1"))
# dropout
fully_connected_layer_1 = tf.nn.dropout(fully_connected_layer_1, keep_prob=self.hold_prob)
# fully connected layer 2
output_activations_2 = self.fc2_params
fully_connected_layer_2 = tf.nn.relu(
fully_connected_layer(fully_connected_layer_1, output_activations_2, self.mu, self.sigma, self.bias_value,
name="fully_connected_2" ))
fully_connected_layer_2 = tf.nn.dropout(fully_connected_layer_2, keep_prob=self.hold_prob)
# fully connected layer 3
output_activations_3 = self.fc3_params
fully_connected_layer_3 = fully_connected_layer(fully_connected_layer_2, output_activations_3, self.mu, self.sigma, self.bias_value,
name="fully_connected_3")
return fully_connected_layer_3
def get_logits(self):
return self.logits
def get_summary(self):
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
| 34.798319
| 140
| 0.588264
|
047778a629f1face6df9f3348a55f3c5790fdb4f
| 11,036
|
py
|
Python
|
cirq/ops/swap_gates.py
|
alex-treebeard/Cirq
|
10594c0edf7a4c26d5d21f985c6dc391197d3075
|
[
"Apache-2.0"
] | 1
|
2021-03-07T19:34:28.000Z
|
2021-03-07T19:34:28.000Z
|
cirq/ops/swap_gates.py
|
alex-treebeard/Cirq
|
10594c0edf7a4c26d5d21f985c6dc391197d3075
|
[
"Apache-2.0"
] | 4
|
2021-01-11T10:35:37.000Z
|
2021-01-28T19:17:02.000Z
|
cirq/ops/swap_gates.py
|
alex-treebeard/Cirq
|
10594c0edf7a4c26d5d21f985c6dc391197d3075
|
[
"Apache-2.0"
] | 1
|
2021-12-30T21:50:00.000Z
|
2021-12-30T21:50:00.000Z
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SWAP and ISWAP gates.
This module creates Gate instances for the following gates:
SWAP: the swap gate.
ISWAP: a swap gate with a phase on the swapped subspace.
Each of these are implemented as EigenGates, which means that they can be
raised to a power (i.e. cirq.ISWAP**0.5). See the definition in EigenGate.
"""
from typing import Optional, Tuple, TYPE_CHECKING
import numpy as np
import sympy
from cirq import protocols, value
from cirq._compat import proper_repr
from cirq._doc import document
from cirq.ops import common_gates, gate_features, eigen_gate
if TYPE_CHECKING:
import cirq
class SwapPowGate(
eigen_gate.EigenGate, gate_features.TwoQubitGate, gate_features.InterchangeableQubitsGate
):
"""The SWAP gate, possibly raised to a power. Exchanges qubits.
SwapPowGate()**t = SwapPowGate(exponent=t) and acts on two qubits in the
computational basis as the matrix:
[[1, 0, 0, 0],
[0, g·c, -i·g·s, 0],
[0, -i·g·s, g·c, 0],
[0, 0, 0, 1]]
where:
c = cos(π·t/2)
s = sin(π·t/2)
g = exp(i·π·t/2).
`cirq.SWAP`, the swap gate, is an instance of this gate at exponent=1.
"""
def _decompose_(self, qubits):
"""See base class."""
a, b = qubits
yield common_gates.CNOT(a, b)
yield common_gates.CNotPowGate(exponent=self._exponent, global_shift=self.global_shift).on(
b, a
)
yield common_gates.CNOT(a, b)
def _eigen_components(self):
# yapf: disable
return [
(0, np.array([[1, 0, 0, 0],
[0, 0.5, 0.5, 0],
[0, 0.5, 0.5, 0],
[0, 0, 0, 1]])),
(1, np.array([[0, 0, 0, 0],
[0, 0.5, -0.5, 0],
[0, -0.5, 0.5, 0],
[0, 0, 0, 0]])),
]
# yapf: enable
def _trace_distance_bound_(self) -> Optional[float]:
if self._is_parameterized_():
return None
return abs(np.sin(self._exponent * 0.5 * np.pi))
def _has_stabilizer_effect_(self) -> Optional[bool]:
if self._is_parameterized_():
return None
return self.exponent % 1 == 0
def _act_on_(self, args):
from cirq import ops, sim, protocols
if isinstance(args, (sim.ActOnStabilizerCHFormArgs, sim.ActOnCliffordTableauArgs)):
if not self._has_stabilizer_effect_():
return NotImplemented
if isinstance(args, sim.ActOnStabilizerCHFormArgs):
args.state.omega *= 1j ** (2 * self.global_shift * self._exponent)
if self._exponent % 2 == 1:
protocols.act_on(ops.CNOT, args)
args.axes = args.axes[::-1]
protocols.act_on(ops.CNOT, args)
args.axes = args.axes[::-1]
protocols.act_on(ops.CNOT, args)
# An even exponent does not change anything except the global phase above.
return True
return NotImplemented
def _apply_unitary_(self, args: 'protocols.ApplyUnitaryArgs') -> Optional[np.ndarray]:
if self._exponent != 1:
return NotImplemented
zo = args.subspace_index(0b01)
oz = args.subspace_index(0b10)
args.available_buffer[zo] = args.target_tensor[zo]
args.target_tensor[zo] = args.target_tensor[oz]
args.target_tensor[oz] = args.available_buffer[zo]
p = 1j ** (2 * self._exponent * self._global_shift)
if p != 1:
args.target_tensor *= p
return args.target_tensor
def _pauli_expansion_(self) -> value.LinearDict[str]:
if protocols.is_parameterized(self):
return NotImplemented
global_phase = 1j ** (2 * self._exponent * self._global_shift)
swap_phase = 1j ** self._exponent
c = -1j * swap_phase * np.sin(np.pi * self._exponent / 2) / 2
return value.LinearDict(
{
'II': global_phase * (1 - c),
'XX': global_phase * c,
'YY': global_phase * c,
'ZZ': global_phase * c,
}
)
def _circuit_diagram_info_(
self, args: 'cirq.CircuitDiagramInfoArgs'
) -> 'cirq.CircuitDiagramInfo':
if not args.use_unicode_characters:
return protocols.CircuitDiagramInfo(
wire_symbols=('Swap', 'Swap'), exponent=self._diagram_exponent(args)
)
return protocols.CircuitDiagramInfo(
wire_symbols=('×', '×'), exponent=self._diagram_exponent(args)
)
def _qasm_(self, args: 'cirq.QasmArgs', qubits: Tuple['cirq.Qid', ...]) -> Optional[str]:
if self._exponent != 1:
return None # Don't have an equivalent gate in QASM
args.validate_version('2.0')
return args.format('swap {0},{1};\n', qubits[0], qubits[1])
def _quil_(
self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter'
) -> Optional[str]:
if self._exponent == 1:
return formatter.format('SWAP {0} {1}\n', qubits[0], qubits[1])
return formatter.format(
'PSWAP({0}) {1} {2}\n', self._exponent * np.pi, qubits[0], qubits[1]
)
def __str__(self) -> str:
if self._exponent == 1:
return 'SWAP'
return f'SWAP**{self._exponent}'
def __repr__(self) -> str:
e = proper_repr(self._exponent)
if self._global_shift == 0:
if self._exponent == 1:
return 'cirq.SWAP'
return f'(cirq.SWAP**{e})'
return f'cirq.SwapPowGate(exponent={e}, global_shift={self._global_shift!r})'
class ISwapPowGate(
eigen_gate.EigenGate, gate_features.InterchangeableQubitsGate, gate_features.TwoQubitGate
):
"""Rotates the |01⟩ vs |10⟩ subspace of two qubits around its Bloch X-axis.
When exponent=1, swaps the two qubits and phases |01⟩ and |10⟩ by i. More
generally, this gate's matrix is defined as follows:
ISWAP**t ≡ exp(+i π t (X⊗X + Y⊗Y) / 4)
which is given by the matrix:
[[1, 0, 0, 0],
[0, c, i·s, 0],
[0, i·s, c, 0],
[0, 0, 0, 1]]
where:
c = cos(π·t/2)
s = sin(π·t/2)
`cirq.ISWAP`, the swap gate that applies i to the |01⟩ and |10⟩ states,
is an instance of this gate at exponent=1.
References:
"What is the matrix of the iSwap gate?"
https://quantumcomputing.stackexchange.com/questions/2594/
"""
def _eigen_components(self):
# yapf: disable
return [
(0, np.diag([1, 0, 0, 1])),
(+0.5, np.array([[0, 0, 0, 0],
[0, 0.5, 0.5, 0],
[0, 0.5, 0.5, 0],
[0, 0, 0, 0]])),
(-0.5, np.array([[0, 0, 0, 0],
[0, 0.5, -0.5, 0],
[0, -0.5, 0.5, 0],
[0, 0, 0, 0]])),
]
# yapf: enable
def _decompose_(self, qubits):
a, b = qubits
yield common_gates.CNOT(a, b)
yield common_gates.H(a)
yield common_gates.CNOT(b, a)
yield common_gates.ZPowGate(exponent=self._exponent / 2, global_shift=self.global_shift).on(
a
)
yield common_gates.CNOT(b, a)
yield common_gates.ZPowGate(
exponent=-self._exponent / 2, global_shift=-self.global_shift
).on(a)
yield common_gates.H(a)
yield common_gates.CNOT(a, b)
def _apply_unitary_(self, args: 'protocols.ApplyUnitaryArgs') -> Optional[np.ndarray]:
if self._exponent != 1:
return NotImplemented
zo = args.subspace_index(0b01)
oz = args.subspace_index(0b10)
args.available_buffer[zo] = args.target_tensor[zo]
args.target_tensor[zo] = args.target_tensor[oz]
args.target_tensor[oz] = args.available_buffer[zo]
args.target_tensor[zo] *= 1j
args.target_tensor[oz] *= 1j
p = 1j ** (2 * self._exponent * self._global_shift)
if p != 1:
args.target_tensor *= p
return args.target_tensor
def _pauli_expansion_(self) -> value.LinearDict[str]:
if protocols.is_parameterized(self):
return NotImplemented
global_phase = 1j ** (2 * self._exponent * self._global_shift)
angle = np.pi * self._exponent / 4
c, s = np.cos(angle), np.sin(angle)
return value.LinearDict(
{
'II': global_phase * c * c,
'XX': global_phase * c * s * 1j,
'YY': global_phase * s * c * 1j,
'ZZ': global_phase * s * s,
}
)
def _circuit_diagram_info_(
self, args: 'cirq.CircuitDiagramInfoArgs'
) -> 'cirq.CircuitDiagramInfo':
return protocols.CircuitDiagramInfo(
wire_symbols=('iSwap', 'iSwap'), exponent=self._diagram_exponent(args)
)
def __str__(self) -> str:
if self._exponent == 1:
return 'ISWAP'
return f'ISWAP**{self._exponent}'
def __repr__(self) -> str:
e = proper_repr(self._exponent)
if self._global_shift == 0:
if self._exponent == 1:
return 'cirq.ISWAP'
return f'(cirq.ISWAP**{e})'
return f'cirq.ISwapPowGate(exponent={e}, global_shift={self._global_shift!r})'
def _quil_(self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter') -> str:
if self._exponent == 1:
return formatter.format('ISWAP {0} {1}\n', qubits[0], qubits[1])
return formatter.format('XY({0}) {1} {2}\n', self._exponent * np.pi, qubits[0], qubits[1])
def riswap(rads: value.TParamVal) -> ISwapPowGate:
"""Returns gate with matrix exp(+i angle_rads (X⊗X + Y⊗Y) / 2)."""
pi = sympy.pi if protocols.is_parameterized(rads) else np.pi
return ISwapPowGate() ** (2 * rads / pi)
SWAP = SwapPowGate()
document(
SWAP,
"""The swap gate.
Matrix:
```
[[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]]
```
""",
)
ISWAP = ISwapPowGate()
document(
ISWAP,
"""The iswap gate.
Matrix:
```
[[1, 0, 0, 0],
[0, 0, i, 0],
[0, i, 0, 0],
[0, 0, 0, 1]]
```
""",
)
| 32.747774
| 100
| 0.560167
|
974e6829ad215986d7398d9c9663e70c20c2669d
| 13,389
|
py
|
Python
|
mmf/datasets/multi_dataset_loader.py
|
sisilmehta2000/mmf
|
ac1bb736f281ffbde367cfe9cf6f4f78fc890fc4
|
[
"BSD-3-Clause"
] | 1,928
|
2020-05-07T19:00:53.000Z
|
2022-03-31T17:02:59.000Z
|
mmf/datasets/multi_dataset_loader.py
|
sisilmehta2000/mmf
|
ac1bb736f281ffbde367cfe9cf6f4f78fc890fc4
|
[
"BSD-3-Clause"
] | 914
|
2020-05-07T18:36:26.000Z
|
2022-03-31T05:45:26.000Z
|
mmf/datasets/multi_dataset_loader.py
|
sisilmehta2000/mmf
|
ac1bb736f281ffbde367cfe9cf6f4f78fc890fc4
|
[
"BSD-3-Clause"
] | 490
|
2020-05-07T20:05:10.000Z
|
2022-03-31T14:17:23.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
MultiDatasetLoader class is used by DatasetLoader class to load multiple datasets
and more granular
"""
import logging
import warnings
from typing import Dict, Iterator
import torch
from mmf.common.sample import SampleList, convert_batch_to_sample_list
from mmf.datasets import iteration_strategies
from mmf.utils.build import build_dataloader_and_sampler, build_dataset
from mmf.utils.dataset import dataset_list_from_config
from mmf.utils.distributed import (
broadcast_scalar,
get_world_size,
is_dist_initialized,
is_main,
is_xla,
)
from mmf.utils.general import get_batch_size, get_current_device
from omegaconf import OmegaConf
from torch.utils.data.dataloader import DataLoader, Sampler
logger = logging.getLogger(__name__)
class MultiDataLoader:
def __init__(
self,
loaders: Dict[str, DataLoader],
iteration_strategy: iteration_strategies.IterationStrategy = None,
):
if loaders is None or len(loaders) == 0:
warnings.warn(
"Empty loaders passed into MultiDataLoader. This can have "
"unintended consequences."
)
if iteration_strategy is None:
iteration_strategy = iteration_strategies.RoundRobinIterationStrategy(
OmegaConf.create(), loaders
)
self._iteration_strategy = iteration_strategy
self._loaders = loaders
self._is_main = is_main()
self._num_datasets = len(self.loaders)
self.dataset_list = list(loaders.keys())
self._iterators = {}
self._finished_iterators = {}
self.current_index = 0
self.set_lengths()
self.set_samplers()
def set_lengths(self):
self._total_length = 0
for loader in self.loaders.values():
# Some loaders might not have dataset attribute
# set, in this case we won't consider them in
# dataset lengths.
if not hasattr(loader, "dataset"):
continue
dataset_instance = loader.dataset
if hasattr(dataset_instance, "__len__"):
dataset_instance_length = len(dataset_instance)
assert dataset_instance_length, f"dataset: {self.dataset_type} is empty"
self._total_length += dataset_instance_length
def set_samplers(self):
self.samplers: Dict[str, Sampler] = {}
for key, loader in self.loaders.items():
if hasattr(loader, "sampler"):
self.samplers[key] = loader.sampler
def get_datasets(self):
return [loader.dataset for loader in self.loaders.values()]
@property
def loaders(self) -> Dict[str, DataLoader]:
return self._loaders
@property
def samplers(self) -> Dict[str, Sampler]:
return self._samplers
@samplers.setter
def samplers(self, samplers: Dict[str, Sampler]):
self._samplers = samplers
@property
def num_datasets(self) -> int:
return self._num_datasets
@property
def iterators(self) -> Dict[str, Iterator[SampleList]]:
return self._iterators
@iterators.setter
def iterators(self, iterators: Dict[str, Iterator[SampleList]]):
self._iterators = iterators
@property
def current_loader(self) -> DataLoader:
return self.loaders[self.current_dataset_name]
@property
def iteration_strategy(self) -> iteration_strategies.IterationStrategy:
return self._iteration_strategy
@property
def current_iterator(self) -> DataLoader:
return self.iterators[self.current_dataset_name]
@property
def current_dataset_name(self) -> str:
return self.dataset_list[self.current_index]
@property
def current_dataset(self) -> torch.utils.data.Dataset:
if hasattr(self.current_loader, "dataset"):
return self.current_loader.dataset
else:
return None
@property
def first_loader(self) -> DataLoader:
return list(self.loaders.values())[0]
def __len__(self) -> int:
# Since, this is iterator, we need to return total length == number of batches
# and as get_batch_size returns per GPU batch size, it needs to be multiplied
# by world size
batch_size = get_batch_size() * get_world_size()
# Changed the length to accomadate drop_last == True
# drop_last is required if the batch is split into multiple cores
# some of the cores may not have enough examples.
if is_xla():
logging.info(
"drop_last is set to True to avoid uneven dimension shapes "
"across cores."
)
return (self._total_length) // batch_size
else:
# This assumes drop_last=False for all loaders. See also
# build_dataloader_and_sampler().
return (self._total_length + batch_size - 1) // batch_size
def __iter__(self):
# Clear off old iterators
self._finished_iterators = {}
self.iterators = {}
for key, loader in self.loaders.items():
self.iterators[key] = iter(loader)
self.change_dataloader()
return self
def __next__(self) -> SampleList:
"""Calculation of next batch is performed using following logic.
Current chosen iterator is set in the change_dataloader function
based on the chosen iteration strategy which is called everytime
prepare_batch is called.
If we get the next batch from iterator without any StopIteration exception,
we return it as it is. Otherwise, we have two cases:
1. In some iteration strategies (example size proportional), each dataset
needs to same number of epochs at any given time, we need to yield
StopIteration exception when all iterators are finished. In turn, this
will yield to __iter__ all reignite all of the iterators. The code will
not reach __iter__ until unless all iterators are exhausted. An iteration
strategy should specify this behavior through `should_exhaust_all_iterators`
property
2. In other cases of iteration strategies, epochs don't make sense.
Think of a case of random (equal) proportional sampling for dataset x and y
where x is half the size of y. When x will complete its 2 epochs, y will
have only 1 epoch completed. **So please don't use max_epochs or epoch
based training in this case as it won't be honored**. If an iterator is
finished, we just reignite it in this case and finished iterators
variable isn't used. This means that this case will never reach the
__iter__ function ever again.
Returns:
SampleList: sample list instance from currently selected dataset
"""
try:
next_batch = next(self.current_iterator)
except StopIteration:
if self.iteration_strategy.should_exhaust_all_iterators:
self._finished_iterators[self.current_dataset_name] = 1
if len(self._finished_iterators) == self.num_datasets:
raise
else:
self.change_dataloader()
next_batch = next(self.current_iterator)
else:
iterator = iter(self.current_loader)
self.iterators[self.current_dataset_name] = iterator
next_batch = next(self.current_iterator)
# Save dataset name and dataset type beforehand as
# prepare_data will change the current index
current_dataset_name = self.current_dataset_name
current_dataset_type = self.current_dataset.dataset_type
next_batch = self.prepare_batch(next_batch)
next_batch = convert_batch_to_sample_list(next_batch)
next_batch.dataset_name = current_dataset_name
next_batch.dataset_type = current_dataset_type
return next_batch
def change_dataloader(self):
choice = 0
if self.num_datasets <= 1:
self.current_index = choice
return
if self._is_main:
choice = self.iteration_strategy()
# self._finished_iterators will always be empty in case of
# non-proportional (equal) sampling
while self.dataset_list[choice] in self._finished_iterators:
choice = self.iteration_strategy()
choice = broadcast_scalar(choice, 0, device=get_current_device())
self.current_index = choice
def prepare_batch(self, batch: SampleList) -> SampleList:
if self.current_dataset and hasattr(self.current_dataset, "prepare_batch"):
batch = self.current_dataset.prepare_batch(batch)
self.change_dataloader()
return batch
def seed_sampler(self, epoch: int):
if is_dist_initialized():
for sampler in self.samplers.values():
if sampler is not None and hasattr(sampler, "set_epoch"):
sampler.set_epoch(epoch)
# TODO: Deprecate in favor of MultiDataModule
class MultiDatasetLoader(MultiDataLoader):
"""
MultiDatasetLoader class that is used for training on multiple datasets together.
"""
def __init__(self, dataset_type: str = "train"):
self._dataset_type = dataset_type
self._datasets = []
super().__init__({})
@property
def dataset_type(self):
return self._dataset_type
@property
def datasets(self):
return self._datasets
def load(self, config):
self.build_datasets(config)
self.build_dataloaders()
self.set_lengths()
def build_datasets(self, config):
self._datasets = []
self.config = config
self._given_datasets = dataset_list_from_config(self.config)
for dataset in self._given_datasets:
if dataset in self.config.dataset_config:
dataset_config = self.config.dataset_config[dataset]
else:
warnings.warn(
f"Dataset {dataset} is missing from dataset_config"
+ " in config. Proceeding with empty config."
)
dataset_config = OmegaConf.create()
dataset_instance = build_dataset(dataset, dataset_config, self.dataset_type)
if dataset_instance is None:
continue
self.datasets.append(dataset_instance)
self.dataset_list.append(dataset)
self._num_datasets = len(self.datasets)
self.current_index = 0
self._infer_dataset_probabilities()
def build_dataloaders(self):
assert len(self._datasets) > 0, "Call build_datasets first"
for dataset_instance in self.datasets:
loader_instance, _ = build_dataloader_and_sampler(
dataset_instance, self.config.training
)
sampler_instance = loader_instance.sampler
self.loaders[dataset_instance.name] = loader_instance
self.samplers[dataset_instance.name] = sampler_instance
self.current_loader = self.loaders[self.current_dataset_name]
def verbose_dump(self, *args, **kwargs):
self._chosen_dataset.verbose_dump(*args, **kwargs)
# Kept for backwards compatibility for now
# TODO: Remove in future.
def _infer_dataset_probabilities(self):
from mmf.utils.configuration import get_global_config
training = get_global_config("training")
proportional_sampling = training.get("dataset_size_proportional_sampling", True)
if proportional_sampling is True:
strategy = iteration_strategies.SizeProportionalIterationStrategy
self._iteration_strategy = strategy(OmegaConf.create(), self.loaders)
else:
self._iteration_strategy = iteration_strategies.RandomIterationStrategy(
OmegaConf.create(), self.loaders
)
multitasking = get_global_config("multitasking")
multitasking_enabled = multitasking.get("enabled", False)
assert (
proportional_sampling is True or training.get("max_epochs", None) is None
), "Epoch based training can only be used with size proportional sampling"
assert not (proportional_sampling and multitasking_enabled), (
"Multitasking (manually-specified) per-dataset ratios cannot be used "
"with size proportional sampling"
)
if multitasking_enabled and "sampling_ratios" in multitasking:
self._iteration_strategy = iteration_strategies.RatiosIterationStrategy(
OmegaConf.create(
{
"sampling_ratios": multitasking.sampling_ratios,
"datasets": self._given_datasets,
}
),
self._loaders,
)
elif proportional_sampling is True:
strategy = iteration_strategies.SizeProportionalIterationStrategy
self._iteration_strategy = strategy(OmegaConf.create(), self.loaders)
else:
self._iteration_strategy = iteration_strategies.RandomIterationStrategy(
OmegaConf.create(), self.loaders
)
| 36.284553
| 88
| 0.651804
|
8767a7c21922c77c77faea5f03cdc0e44a174b1c
| 1,768
|
py
|
Python
|
main.py
|
ctulhu31/siglev
|
ff30ae0aa09050cfae480b0a43127b20e500ddfe
|
[
"MIT"
] | null | null | null |
main.py
|
ctulhu31/siglev
|
ff30ae0aa09050cfae480b0a43127b20e500ddfe
|
[
"MIT"
] | null | null | null |
main.py
|
ctulhu31/siglev
|
ff30ae0aa09050cfae480b0a43127b20e500ddfe
|
[
"MIT"
] | null | null | null |
import math
class part:
def __init__(self, name):
self.__name = name
self.__val1 = None
self.__val2 = None
self.__coefficient = 0.0
def addValue(self, val, diff=1.0):
if self.__val1 is None:
self.__val1 = val
elif self.__val2 is None:
self.__val2 = val
self.__coefficient += math.fabs(self.__val2 - self.__val1) / math.fabs(diff)
else:
self.__val1 = self.__val2
self.__val2 = val
self.__coefficient += math.fabs(self.__val2 - self.__val1) / math.fabs(diff)
def getName(self, ):
return self.__name
def getCoefficient(self):
return str('%+10f' % self.__coefficient)
def readdata(path):
data = open(path, 'r', encoding='utf-8')
xc = 0
datalist = []
res = []
for i in data:
z = i.replace('\n', '').split(' ')
if xc == 0:
for j in range(1, len(z)):
datalist.append(part(z[j]))
xc += 1
elif xc == 1:
res.append(float(z[0]))
for j in range(1, len(z)):
datalist[j - 1].addValue(float(z[j]))
xc += 1
else:
if len(res) == 2:
res[0] = res[1]
res[1] = float(z[0])
else:
res.append(float(z[0]))
for j in range(1, len(z)):
if not float(res[1] - res[0]) == 0:
datalist[j - 1].addValue(float(z[j]), float(res[1] - res[0]))
return datalist
def printresult(datalist):
for i in datalist:
print(i.getName() + ' ' + i.getCoefficient())
def main():
datapath = input('Enter path to data file: ')
printresult(readdata(datapath))
main()
| 28.063492
| 88
| 0.498303
|
be719e579d4a2da82f4563a0bd373da6b7365bd3
| 703
|
py
|
Python
|
jesse/indicators/linearreg.py
|
h0ke/jesse
|
02dbf2b5df3a970eed18b276d5e3bcf8fb3f9220
|
[
"MIT"
] | 4
|
2021-02-23T18:23:58.000Z
|
2021-10-10T07:32:41.000Z
|
jesse/indicators/linearreg.py
|
h0ke/jesse
|
02dbf2b5df3a970eed18b276d5e3bcf8fb3f9220
|
[
"MIT"
] | 1
|
2021-06-05T19:59:56.000Z
|
2021-06-05T19:59:56.000Z
|
jesse/indicators/linearreg.py
|
h0ke/jesse
|
02dbf2b5df3a970eed18b276d5e3bcf8fb3f9220
|
[
"MIT"
] | 2
|
2021-04-30T06:49:26.000Z
|
2022-01-24T09:24:35.000Z
|
from typing import Union
import numpy as np
import talib
from jesse.helpers import get_candle_source
def linearreg(candles: np.ndarray, period=14, source_type="close", sequential=False) -> Union[float, np.ndarray]:
"""
LINEARREG - Linear Regression
:param candles: np.ndarray
:param period: int - default: 14
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
if not sequential and len(candles) > 240:
candles = candles[-240:]
source = get_candle_source(candles, source_type=source_type)
res = talib.LINEARREG(source, timeperiod=period)
return res if sequential else res[-1]
| 26.037037
| 113
| 0.698435
|
a867d5aca4d591c84ebfc2adfb4cbf261a607ebb
| 20,284
|
py
|
Python
|
jina/peapods/runtimes/zmq/zed.py
|
vishalbelsare/jina
|
ae72cc5ce1f7e7f4c662e72e96ea21dddc28bf43
|
[
"Apache-2.0"
] | 3
|
2021-12-06T08:10:02.000Z
|
2021-12-06T14:50:11.000Z
|
jina/peapods/runtimes/zmq/zed.py
|
manavshah123/jina
|
f18b04eb82d18a3c554e2892bbae4b95fc0cb13e
|
[
"Apache-2.0"
] | 1
|
2021-09-16T12:12:29.000Z
|
2021-09-16T12:12:29.000Z
|
jina/peapods/runtimes/zmq/zed.py
|
manavshah123/jina
|
f18b04eb82d18a3c554e2892bbae4b95fc0cb13e
|
[
"Apache-2.0"
] | 1
|
2021-11-15T05:51:07.000Z
|
2021-11-15T05:51:07.000Z
|
import argparse
import time
import threading
from collections import defaultdict
from typing import Dict, List, Optional, Union, TYPE_CHECKING
import zmq
import signal
from .... import __windows__
from ..base import BaseRuntime
from ..request_handlers.data_request_handler import DataRequestHandler
from ...zmq import ZmqStreamlet
from ....enums import OnErrorStrategy, SocketType
from ....excepts import (
NoExplicitMessage,
MemoryOverHighWatermark,
ChainedPodException,
RuntimeTerminated,
UnknownControlCommand,
)
from ....helper import random_identity
from ....logging.profile import used_memory
from ....proto import jina_pb2
from ....types.message import Message
from ....types.routing.table import RoutingTable
from ....importer import ImportExtensions
if TYPE_CHECKING:
import multiprocessing
from ....logging.logger import JinaLogger
class ZEDRuntime(BaseRuntime):
"""Runtime procedure leveraging :class:`ZmqStreamlet` for Executor."""
def __init__(self, args: 'argparse.Namespace', **kwargs):
"""Initialize private parameters and execute private loading functions.
:param args: args from CLI
:param kwargs: extra keyword arguments
"""
super().__init__(args, **kwargs)
if not __windows__:
try:
signal.signal(signal.SIGTERM, self._handle_sig_term)
except ValueError:
self.logger.warning(
'Runtime is being run in a thread. Threads can not receive signals and may not shutdown as expected.'
)
else:
with ImportExtensions(
required=True,
logger=self.logger,
help_text='''If you see a 'DLL load failed' error, please reinstall `pywin32`.
If you're using conda, please use the command `conda install -c anaconda pywin32`''',
):
import win32api
win32api.SetConsoleCtrlHandler(self._handle_sig_term)
self._id = random_identity()
self._last_active_time = time.perf_counter()
self.ctrl_addr = self.get_control_address(args.host, args.port_ctrl)
# all pending messages collected so far, key is the request id
self._pending_msgs = defaultdict(list) # type: Dict[str, List['Message']]
# idle_dealer_ids only becomes non-None when it receives IDLE ControlRequest
self._idle_dealer_ids = set()
self._data_request_handler = DataRequestHandler(self.args, self.logger)
self._static_routing_table = args.static_routing_table
self._load_zmqstreamlet()
def run_forever(self):
"""Start the `ZmqStreamlet`."""
self._zmqstreamlet.start(self._msg_callback)
def _handle_sig_term(self, *args):
self.teardown()
def teardown(self):
"""Close the `ZmqStreamlet` and `Executor`."""
self._zmqstreamlet.close()
self._data_request_handler.close()
super().teardown()
#: Private methods required by :meth:`setup`
def _load_zmqstreamlet(self):
"""Load ZMQStreamlet to this runtime."""
# important: fix zmqstreamlet ctrl address to replace the the ctrl address generated in the main
# process/thread
self._zmqstreamlet = ZmqStreamlet(
args=self.args,
logger=self.logger,
ctrl_addr=self.ctrl_addr,
)
#: Private methods required by :meth:`teardown`
def _check_memory_watermark(self):
"""Check the memory watermark."""
if used_memory() > self.args.memory_hwm > 0:
raise MemoryOverHighWatermark
#: Private methods required by run_forever
def _pre_hook(self, msg: 'Message') -> 'Message':
"""
Pre-hook function, what to do after first receiving the message.
:param msg: received message
:return: `ZEDRuntime`
"""
msg.add_route(self.name, self._id)
expected_parts = self._expect_parts(msg)
req_id = msg.envelope.request_id
if expected_parts > 1:
self._pending_msgs[req_id].append(msg)
num_partial_requests = len(self._pending_msgs[req_id])
if self.logger.debug_enabled:
self._log_info_msg(
msg,
f'({num_partial_requests}/{expected_parts} parts)'
if expected_parts > 1
else '',
)
if expected_parts > 1 and expected_parts > num_partial_requests:
# NOTE: reduce priority is higher than chain exception
# otherwise a reducer will lose its function when earlier pods raise exception
raise NoExplicitMessage
if msg.envelope.request_type == 'ControlRequest':
self._handle_control_req(msg)
if (
msg.envelope.status.code == jina_pb2.StatusProto.ERROR
and self.args.on_error_strategy >= OnErrorStrategy.SKIP_HANDLE
):
raise ChainedPodException
return msg
def _log_info_msg(self, msg, part_str):
info_msg = f'recv {msg.envelope.request_type} '
req_type = msg.envelope.request_type
if req_type == 'DataRequest':
info_msg += (
f'({msg.envelope.header.exec_endpoint}) - ({msg.envelope.request_id}) '
)
elif req_type == 'ControlRequest':
info_msg += f'({msg.request.command}) '
info_msg += f'{part_str} from {msg.colored_route}'
self.logger.debug(info_msg)
def _post_hook(self, msg: 'Message') -> 'Message':
"""
Post-hook function, what to do before handing out the message.
:param msg: the transformed message
:return: `ZEDRuntime`
"""
# do NOT access `msg.request.*` in the _pre_hook, as it will trigger the deserialization
# all meta information should be stored and accessed via `msg.envelope`
self._last_active_time = time.perf_counter()
self._check_memory_watermark()
if self._expect_parts(msg) > 1:
msgs = self._pending_msgs.pop(msg.envelope.request_id)
msg.merge_envelope_from(msgs)
msg.update_timestamp()
return msg
@staticmethod
def _parse_params(parameters: Dict, executor_name: str):
parsed_params = parameters
specific_parameters = parameters.get(executor_name, None)
if specific_parameters:
parsed_params.update(**specific_parameters)
return parsed_params
def _handle(self, msg: 'Message') -> 'Message':
"""Register the current message to this pea, so that all message-related properties are up-to-date, including
:attr:`request`, :attr:`prev_requests`, :attr:`message`, :attr:`prev_messages`. And then call the executor to handle
this message if its envelope's status is not ERROR, else skip handling of message.
.. note::
Handle does not handle explicitly message because it may wait for different messages when different parts are expected
:param msg: received message
:return: the transformed message.
"""
# skip executor for non-DataRequest
if msg.envelope.request_type != 'DataRequest':
self.logger.debug(f'skip executor: not data request')
return msg
# migrated from the previously RouteDriver logic
# set dealer id
if self._idle_dealer_ids:
dealer_id = self._idle_dealer_ids.pop()
msg.envelope.receiver_id = dealer_id
# when no available dealer, pause the pollin from upstream
if not self._idle_dealer_ids:
self._pause_pollin()
self.logger.debug(
f'using route, set receiver_id: {msg.envelope.receiver_id}'
)
req_id = msg.envelope.request_id
num_expected_parts = self._expect_parts(msg)
self._data_request_handler.handle(
msg=msg,
partial_requests=[m.request for m in self._pending_msgs[req_id]]
if num_expected_parts > 1
else None,
peapod_name=self.name,
)
return msg
def _pause_pollin(self):
self.logger.debug('No idle dealers available, pause pollin')
self._zmqstreamlet.pause_pollin()
def _handle_control_req(self, msg: 'Message'):
# migrated from previous ControlDriver logic
if msg.request.command == 'TERMINATE':
msg.envelope.status.code = jina_pb2.StatusProto.SUCCESS
raise RuntimeTerminated
elif msg.request.command == 'STATUS':
msg.envelope.status.code = jina_pb2.StatusProto.READY
msg.request.parameters = vars(self.args)
elif msg.request.command == 'IDLE':
self._idle_dealer_ids.add(msg.envelope.receiver_id)
self._zmqstreamlet.resume_pollin()
self.logger.debug(
f'{msg.envelope.receiver_id} is idle, now I know these idle peas {self._idle_dealer_ids}'
)
elif msg.request.command == 'CANCEL':
if msg.envelope.receiver_id in self._idle_dealer_ids:
self.logger.debug(
f'Removing idle dealer {msg.envelope.receiver_id}, now I know these idle peas {self._idle_dealer_ids}'
)
self._idle_dealer_ids.remove(msg.envelope.receiver_id)
# when no available dealer, pause the pollin from upstream
if not self._idle_dealer_ids:
self._pause_pollin()
elif msg.request.command == 'ACTIVATE':
self._zmqstreamlet._send_idle_to_router()
elif msg.request.command == 'DEACTIVATE':
self._zmqstreamlet._send_cancel_to_router()
else:
raise UnknownControlCommand(
f'don\'t know how to handle {msg.request.command}'
)
def _callback(self, msg: 'Message'):
self.is_post_hook_done = False #: if the post_hook is called
msg = self._post_hook(self._handle(self._pre_hook(msg)))
self.is_post_hook_done = True
return msg
def _msg_callback(self, msg: 'Message') -> None:
"""
Callback function after receiving the message
When nothing is returned then nothing is send out via :attr:`zmqlet.sock_out`.
:param msg: received message
"""
try:
# notice how executor related exceptions are handled here
# generally unless executor throws an OSError, the exception are caught and solved inplace
processed_msg = self._callback(msg)
# dont sent responses for CANCEL and IDLE control requests
if msg.is_data_request or msg.request.command not in ['CANCEL', 'IDLE']:
self._zmqstreamlet.send_message(processed_msg)
except RuntimeTerminated:
# this is the proper way to end when a terminate signal is sent
self._zmqstreamlet.send_message(msg)
self._zmqstreamlet.close()
except KeyboardInterrupt as kbex:
# save executor
self.logger.debug(f'{kbex!r} causes the breaking from the event loop')
self._zmqstreamlet.send_message(msg)
self._zmqstreamlet.close(flush=False)
except (SystemError, zmq.error.ZMQError) as ex:
# save executor
self.logger.debug(f'{ex!r} causes the breaking from the event loop')
self._zmqstreamlet.send_message(msg)
self._zmqstreamlet.close()
except MemoryOverHighWatermark:
self.logger.critical(
f'memory usage {used_memory()} GB is above the high-watermark: {self.args.memory_hwm} GB'
)
except NoExplicitMessage:
# silent and do not propagate message anymore
# 1. wait partial message to be finished
# 2. dealer send a control message and no need to go on
pass
except (RuntimeError, Exception, ChainedPodException) as ex:
# general runtime error and nothing serious, we simply mark the message to error and pass on
if not self.is_post_hook_done:
self._post_hook(msg)
if self.args.on_error_strategy == OnErrorStrategy.THROW_EARLY:
raise
if isinstance(ex, ChainedPodException):
# the error is print from previous pod, no need to show it again
# hence just add exception and propagate further
# please do NOT add logger.error here!
msg.add_exception()
else:
msg.add_exception(ex, executor=self._data_request_handler._executor)
self.logger.error(
f'{ex!r}'
+ f'\n add "--quiet-error" to suppress the exception details'
if not self.args.quiet_error
else '',
exc_info=not self.args.quiet_error,
)
self._zmqstreamlet.send_message(msg)
#: Some class-specific properties
@property
def is_idle(self) -> bool:
"""
Return ``True`` when current time is ``max_idle_time`` seconds late than the last active time
:return: True if idle else false.
"""
return (time.perf_counter() - self._last_active_time) > self.args.max_idle_time
def _expect_parts(self, msg: 'Message') -> int:
"""
The expected number of partial messages before trigger :meth:`handle`
:param msg: The message from which to compute the expected parts
:return: expected number of partial messages
"""
if msg.is_data_request:
if (
self.args.socket_in == SocketType.ROUTER_BIND
and not self._static_routing_table
):
graph = RoutingTable(msg.envelope.routing_table)
return graph.active_target_pod.expected_parts
else:
return self.args.num_part
else:
return 1
# Static methods used by the Pea to communicate with the `Runtime` in the separate process
@staticmethod
def status(ctrl_address: str, timeout_ctrl: int):
"""
Send get status control message.
:param ctrl_address: the address where the control message needs to be sent
:param timeout_ctrl: the timeout to wait for control messages to be processed
:return: control message.
"""
from ...zmq import send_ctrl_message
return send_ctrl_message(
ctrl_address, 'STATUS', timeout=timeout_ctrl, raise_exception=False
)
@staticmethod
def is_ready(ctrl_address: str, timeout_ctrl: int) -> bool:
"""
Check if status is ready.
:param ctrl_address: the address where the control message needs to be sent
:param timeout_ctrl: the timeout to wait for control messages to be processed
:return: True if status is ready else False.
"""
status = ZEDRuntime.status(ctrl_address, timeout_ctrl)
return status and status.is_ready
@staticmethod
def wait_for_ready_or_shutdown(
timeout: Optional[float],
ctrl_address: str,
timeout_ctrl: int,
shutdown_event: Union['multiprocessing.Event', 'threading.Event'],
**kwargs,
):
"""
Check if the runtime has successfully started
:param timeout: The time to wait before readiness or failure is determined
:param ctrl_address: the address where the control message needs to be sent
:param timeout_ctrl: the timeout to wait for control messages to be processed
:param shutdown_event: the multiprocessing event to detect if the process failed
:param kwargs: extra keyword arguments
:return: True if is ready or it needs to be shutdown
"""
timeout_ns = 1e9 * timeout if timeout else None
now = time.time_ns()
while timeout_ns is None or time.time_ns() - now < timeout_ns:
if shutdown_event.is_set() or ZEDRuntime.is_ready(
ctrl_address, timeout_ctrl
):
return True
return False
@staticmethod
def _retry_control_message(
ctrl_address: str,
timeout_ctrl: int,
command: str,
num_retry: int,
logger: 'JinaLogger',
):
"""Retry sending a control message with a given command for several trials
:param ctrl_address: the address where the control message needs to be sent
:param timeout_ctrl: the timeout to wait for control messages to be processed
:param command: the command to send in the control message
:param num_retry: the number of retries to successfully send the message
:param logger: the JinaLogger to log messages
"""
from ...zmq import send_ctrl_message
for retry in range(1, num_retry + 1):
logger.debug(f'Sending {command} command for the {retry}th time')
try:
send_ctrl_message(
ctrl_address,
command,
timeout=timeout_ctrl,
raise_exception=True,
)
break
except Exception as ex:
logger.warning(f'{ex!r}')
if retry == num_retry:
raise ex
@staticmethod
def cancel(
control_address: str,
timeout_ctrl: int,
socket_in_type: 'SocketType',
skip_deactivate: bool,
logger: 'JinaLogger',
**kwargs,
):
"""
Check if the runtime has successfully started
:param control_address: the address where the control message needs to be sent
:param timeout_ctrl: the timeout to wait for control messages to be processed
:param socket_in_type: the type of input socket, needed to know if is a dealer
:param skip_deactivate: flag to tell if deactivate signal may be missed.
This is important when you want to independently kill a Runtime
:param logger: the JinaLogger to log messages
:param kwargs: extra keyword arguments
"""
if not skip_deactivate and socket_in_type == SocketType.DEALER_CONNECT:
ZEDRuntime._retry_control_message(
ctrl_address=control_address,
timeout_ctrl=timeout_ctrl,
command='DEACTIVATE',
num_retry=3,
logger=logger,
)
ZEDRuntime._retry_control_message(
ctrl_address=control_address,
timeout_ctrl=timeout_ctrl,
command='TERMINATE',
num_retry=3,
logger=logger,
)
@staticmethod
def activate(
control_address: str,
timeout_ctrl: int,
socket_in_type: 'SocketType',
logger: 'JinaLogger',
**kwargs,
):
"""
Check if the runtime has successfully started
:param control_address: the address where the control message needs to be sent
:param timeout_ctrl: the timeout to wait for control messages to be processed
:param socket_in_type: the type of input socket, needed to know if is a dealer
:param logger: the JinaLogger to log messages
:param kwargs: extra keyword arguments
"""
if socket_in_type == SocketType.DEALER_CONNECT:
ZEDRuntime._retry_control_message(
ctrl_address=control_address,
timeout_ctrl=timeout_ctrl,
command='ACTIVATE',
num_retry=3,
logger=logger,
)
@staticmethod
def get_control_address(host: str, port: str, **kwargs):
"""
Get the control address for a runtime with a given host and port
:param host: the host where the runtime works
:param port: the control port where the runtime listens
:param kwargs: extra keyword arguments
:return: The corresponding control address
"""
from ...zmq import Zmqlet
return Zmqlet.get_ctrl_address(host, port, False)[0]
| 38.12782
| 130
| 0.623151
|
0208350d1dd3e15f7436e96d468faddd0c98e1bf
| 215
|
py
|
Python
|
cms/test_utils/project/placeholderapp/urls_multi.py
|
stefanfoulis/django-cms
|
5af564879b4222aa00a8ee27fbb7d1b3ae0cbde5
|
[
"BSD-3-Clause"
] | 1
|
2019-11-26T04:47:18.000Z
|
2019-11-26T04:47:18.000Z
|
cms/test_utils/project/placeholderapp/urls_multi.py
|
stefanfoulis/django-cms
|
5af564879b4222aa00a8ee27fbb7d1b3ae0cbde5
|
[
"BSD-3-Clause"
] | 6
|
2015-12-02T16:10:20.000Z
|
2016-06-17T14:24:00.000Z
|
cms/test_utils/project/placeholderapp/urls_multi.py
|
stefanfoulis/django-cms
|
5af564879b4222aa00a8ee27fbb7d1b3ae0cbde5
|
[
"BSD-3-Clause"
] | 1
|
2017-10-17T08:20:32.000Z
|
2017-10-17T08:20:32.000Z
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^detail/(?P<pk>[0-9]+)/$', views.detail_view_multi, name="detail_multi"),
url(r'^$', views.list_view_multi, name="list_multi"),
]
| 23.888889
| 83
| 0.669767
|
3607ffc518d4a3994f32590f52cd4c00189f3a9e
| 4,153
|
py
|
Python
|
lte/gateway/python/magma/kernsnoopd/snooper.py
|
electrocucaracha/magma
|
4beadd7ac75616976443f2f76b573cf2aefd73a9
|
[
"BSD-3-Clause"
] | 2
|
2020-12-01T02:32:22.000Z
|
2020-12-27T19:13:48.000Z
|
lte/gateway/python/magma/kernsnoopd/snooper.py
|
markjen/magma
|
c7c7dc2b8714f53b1153e620bbfc002d5009de34
|
[
"BSD-3-Clause"
] | 181
|
2020-02-03T15:17:12.000Z
|
2021-10-06T20:13:29.000Z
|
lte/gateway/python/magma/kernsnoopd/snooper.py
|
markjen/magma
|
c7c7dc2b8714f53b1153e620bbfc002d5009de34
|
[
"BSD-3-Clause"
] | 3
|
2021-11-04T17:33:36.000Z
|
2021-12-24T06:50:36.000Z
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
from bcc import BPF
from jinja2 import Template
from magma.common.job import Job
from magma.kernsnoopd.handlers import ebpf_handlers
EBPF_SRC_DIR = "/etc/magma/ebpf"
if not os.path.isdir(EBPF_SRC_DIR):
EBPF_SRC_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'ebpf',
)
EBPF_COMMON_FILE = 'common.bpf.h'
def _get_ebpf_source(filename, context) -> str:
"""
_get_ebpf_source reads template source from file and renders it with
context parameters
Args:
filename: absolute path of file from which to read template source
context: dict containing parameter values
Returns:
Rendered source contents
"""
with open(filename, 'r') as src_f:
src = src_f.read()
template = Template(src)
return template.render(context)
class NoSourcesFoundError(Exception):
"""
NoSourcesFoundError is thrown when Snooper does not find any eBPF programs
or source files to load into the kernel
"""
class Snooper(Job):
"""
Snooper is a Job that compiles and loads eBPF programs, registered relevant
front-end programs as handlers, and periodically calls their handle methods
"""
def __init__(
self, programs: list, collect_interval: int,
service_registry, service_loop,
):
super().__init__(interval=collect_interval, loop=service_loop)
self._bpf = None
self._handlers = []
self._loop = service_loop
self._ebpf_programs = programs
self._service_registry = service_registry
self._context = {
'PROXY_PORT': service_registry.get_proxy_config().get(
'local_port',
),
}
try:
self._load_ebpf_programs()
self.start()
except NoSourcesFoundError:
logging.error('Fatal: no eBPF sources loaded')
def _load_ebpf_programs(self) -> None:
"""
_load_ebpf_programs reads eBPF templates from _ebpf_programs, renders
them with context, compiles and loads them into kernel, and registers
corresponding front-end handlers
Raises:
NoSourcesFoundError: self._ebpf_programs was empty or no source in
self._ebpf_programs could be loaded
"""
if not self._ebpf_programs:
raise NoSourcesFoundError()
sources = []
for basename in self._ebpf_programs:
filename = os.path.join(EBPF_SRC_DIR, f'{basename}.bpf.c')
try:
sources.append(_get_ebpf_source(filename, self._context))
handler = ebpf_handlers[basename](self._service_registry)
self._handlers.append(handler)
except FileNotFoundError:
logging.error('Could not open eBPF source file %s' % filename)
except KeyError:
logging.error('Fatal: did not find handler for %s' % basename)
# found eBPF sources to load into kernel
if sources:
# find and prepend header
header = os.path.join(EBPF_SRC_DIR, EBPF_COMMON_FILE)
try:
sources.insert(0, _get_ebpf_source(header, self._context))
self._bpf = BPF(text='\n'.join(sources))
logging.info('Loaded sources into kernel')
except FileNotFoundError:
logging.error('Fatal: Could not open header file %s' % header)
else:
raise NoSourcesFoundError()
async def _run(self) -> None:
if self._bpf is not None:
for handler in self._handlers:
handler.handle(self._bpf)
| 33.224
| 79
| 0.649892
|
1d511bd2ae9f829de5c9b30f7a98ead8c6934a86
| 4,047
|
py
|
Python
|
msgraph-cli-extensions/v1_0/usersfunctions_v1_0/azext_usersfunctions_v1_0/vendored_sdks/usersfunctions/aio/operations/_users_contacts_operations.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/v1_0/usersfunctions_v1_0/azext_usersfunctions_v1_0/vendored_sdks/usersfunctions/aio/operations/_users_contacts_operations.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/v1_0/usersfunctions_v1_0/azext_usersfunctions_v1_0/vendored_sdks/usersfunctions/aio/operations/_users_contacts_operations.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsersContactsOperations:
"""UsersContactsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_functions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def delta(
self,
user_id: str,
**kwargs
) -> List["models.MicrosoftGraphContact"]:
"""Invoke function delta.
Invoke function delta.
:param user_id: key: id of user.
:type user_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphContact, or the result of cls(response)
:rtype: list[~users_functions.models.MicrosoftGraphContact]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphContact"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delta.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphContact]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delta.metadata = {'url': '/users/{user-id}/contacts/microsoft.graph.delta()'} # type: ignore
| 42.15625
| 133
| 0.672597
|
f5f0a1b5af1db0d45701d5cd77f11a53f88e5996
| 621
|
py
|
Python
|
main.py
|
CodeWithAgam/Is-it-Leap-Year
|
fa88c79d16d010e49a9d6db2d0ffbdcd0732b82c
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
CodeWithAgam/Is-it-Leap-Year
|
fa88c79d16d010e49a9d6db2d0ffbdcd0732b82c
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
CodeWithAgam/Is-it-Leap-Year
|
fa88c79d16d010e49a9d6db2d0ffbdcd0732b82c
|
[
"Apache-2.0"
] | null | null | null |
# Created by Agamdeep Singh / CodeWithAgam
# Youtube: CodeWithAgam
# Github: CodeWithAgam
# Instagram: @coderagam001 / @codewithagam
# Twitter: @CoderAgam001
# Linkdin: Agamdeep Singh
# Print a welcome message
# Get the year from the user
year = int(input("Which year do you want to check? "))
# Check for the conditions
# The Modulo Sign (%) calculates remainder after division of 2 numbers.
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
print("Leap year.")
else:
print("Not leap year.")
else:
print("Leap year.")
else:
print("Not leap year.")
| 24.84
| 71
| 0.636071
|
2ea4d72a363da00ce175b2807b34d837a5d56d1c
| 1,595
|
py
|
Python
|
launcher/launch.py
|
awesome-archive/byteps
|
8fe4f5e62eaf539f214f3aa68b99cb239eedf66b
|
[
"Apache-2.0"
] | 1
|
2019-10-28T07:36:52.000Z
|
2019-10-28T07:36:52.000Z
|
launcher/launch.py
|
awesome-archive/byteps
|
8fe4f5e62eaf539f214f3aa68b99cb239eedf66b
|
[
"Apache-2.0"
] | null | null | null |
launcher/launch.py
|
awesome-archive/byteps
|
8fe4f5e62eaf539f214f3aa68b99cb239eedf66b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import os
import subprocess
import threading
import sys
import time
def worker(local_rank, local_size, command):
my_env = os.environ.copy()
my_env["BYTEPS_LOCAL_RANK"] = str(local_rank)
my_env["BYTEPS_LOCAL_SIZE"] = str(local_size)
if os.getenv("BYTEPS_ENABLE_GDB", 0):
if command.find("python") != 0:
command = "python " + command
command = "gdb -ex 'run' -ex 'bt' -batch --args " + command
subprocess.check_call(command, env=my_env, stdout=sys.stdout, stderr=sys.stderr, shell=True)
if __name__ == "__main__":
print "BytePS launching " + os.environ["DMLC_ROLE"]
sys.stdout.flush()
if os.environ["DMLC_ROLE"] == "worker":
if "NVIDIA_VISIBLE_DEVICES" in os.environ:
local_size = len(os.environ["NVIDIA_VISIBLE_DEVICES"].split(","))
else:
local_size = 1
t = [None] * local_size
for i in range(local_size):
command = ' '.join(sys.argv[1:])
t[i] = threading.Thread(target=worker, args=[i, local_size, command])
t[i].daemon = True
t[i].start()
for i in range(local_size):
t[i].join()
else:
if "BYTEPS_SERVER_MXNET_PATH" not in os.environ:
print "BYTEPS_SERVER_MXNET_PATH env not set"
os._exit(0)
sys.path.insert(0, os.getenv("BYTEPS_SERVER_MXNET_PATH")+"/python")
import mxnet
print "BytePS Server MXNet version: " + mxnet.__version__
# TODO: terminates when workers quit
while True:
time.sleep(3600)
| 32.55102
| 96
| 0.608777
|
aa7482f14dc7e8c56bf30d3ea8c3c6b5723a4b16
| 19,454
|
py
|
Python
|
fastdoc/asciidoc.py
|
isabella232/fastdoc
|
424f44491b16e9e701de26570e403333f04960db
|
[
"Apache-2.0"
] | null | null | null |
fastdoc/asciidoc.py
|
isabella232/fastdoc
|
424f44491b16e9e701de26570e403333f04960db
|
[
"Apache-2.0"
] | 1
|
2021-02-23T22:51:48.000Z
|
2021-02-23T22:51:48.000Z
|
fastdoc/asciidoc.py
|
isabella232/fastdoc
|
424f44491b16e9e701de26570e403333f04960db
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_asciidoc.ipynb (unless otherwise specified).
__all__ = ['markdown_cell', 'code_cell', 'remove_hidden_cells', 'isolate_adoc_blocks', 'replace_old_jekylls',
'hide_input', 'hide_output', 'extract_html', 'split_max_len', 'deal_error', 'remove_interrupted_pbars',
'get_cell_meta', 'caption_tables', 'TEXT_MAX_WIDTH', 'wrap_text_outputs', 'CODE_MAX_LEN', 'check_code_len',
'deal_quotes', 'add_title_level', 'deal_with_lists', 'replace_jekylls', 'interpret_sidebar',
'IMAGE_CONV_MULT', 'process_images', 'wrap_references', 'extract_attachments', 'sidebar_headers',
'code_cell_tfms', 'md_cell_tfms', 'add_new_line', 'treat_notebook', 'rep_spec_tok', 'ipython2python',
'remove_cells', 'clear_cells', 'format_latex', 'format_outputs', 'fix_quotes', 'fix_references',
'format_tables', 'remove_lines', 'post_process_tfms', 'post_process', 'c', 'exporter', 'add_metadata',
'output_num', 'IMAGE_OUT_MULT', 'get_output_width', 'convert_nb', 'copy_images', 'fastdoc_convert_all']
# Cell
from .imports import *
from fastcore.script import *
from warnings import warn
# Cell
def markdown_cell(md):
return nbformat.notebooknode.NotebookNode({'cell_type': 'markdown', 'source': md, 'metadata': {}})
# Cell
def code_cell(code, metadata=None, outputs=None):
return nbformat.notebooknode.NotebookNode(
{'cell_type': 'code',
'execution_count': None,
'source': code,
'metadata': {} if metadata is None else metadata,
'outputs': [] if outputs is None else outputs})
# Cell
_re_hidden = re.compile(r'^\s*#\s*(hide|clean)\s*$', re.MULTILINE)
# Cell
def remove_hidden_cells(cells):
"Remove cells marked with #hide"
return [c for c in cells if _re_hidden.search(c['source']) is None]
# Cell
def isolate_adoc_blocks(cells):
res = []
for cell in cells:
if cell['cell_type'] == 'markdown' and re.search(r'```\s*asciidoc', cell['source']) is not None:
lines = cell['source'].split('\n')
adoc,s,idx = False,0,0
for line in lines:
if re.search(r'^```\s*asciidoc\s*$', line) is not None and not adoc:
res.append(markdown_cell('\n'.join(lines[s:idx])))
adoc,s = True,idx+1
elif re.search(r'^```\s*$', line) is not None and adoc:
res.append(code_cell('##clear##' + '\n'.join(lines[s:idx])))
adoc,s = False,idx+1
idx+=1
assert not adoc, f"Triple-quote asciidoc block not ended in {cell['source']}"
res.append(markdown_cell('\n'.join(lines[s:])))
else: res.append(cell)
return res
# Cell
#TODO: remove when all notebooks have been ported to v2
def replace_old_jekylls(cell):
if cell['source'].startswith('jekyll'):
pat1 = re.compile(r"""jekyll_(.*)\(['"].*""")
pat2 = re.compile(r"""jekyll_.*\(['"]+([\s\S]*[^'"])['"]+\)$""")
jekyll_type = re.match(pat1, cell['source']).groups()[0]
message = re.match(pat2, cell['source']).groups()[0]
inst = {'warn':'WARNING', 'note':'NOTE', 'important':'TIP'}
cell['metadata'] = {}
cell['source'] = f'##clear##[{inst[jekyll_type]}]\n====\n{message}\n===='
cell['outputs'] = []
return cell
# Cell
_re_hide_input = re.compile(r'^\s*#\s*hide_input\s*$', re.MULTILINE)
# Cell
def hide_input(cell):
if cell['metadata'].get('hide_input', False) or _re_hide_input.search(cell["source"]) is not None: cell['source'] = '##remove##'
return cell
# Cell
_re_hide_output = re.compile(r'^\s*#\s*hide_output\s*$', re.MULTILINE)
# Cell
def hide_output(cell):
if cell['metadata'].get('collapsed', False) or _re_hide_output.search(cell["source"]) is not None:
cell['outputs'] = []
cell['source'] = re.sub(r'#\s*hide_output\s*\n', '', cell['source'])
return cell
# Cell
def extract_html(cell):
for o in cell['outputs']:
if 'data' in o and 'text/html' in o['data']:
o['data']['text/plain'] = o['data']['text/html']
del o['data']['text/html']
return cell
# Cell
def split_max_len(text, l):
words = text.split(' ')
line,lines = "",[]
for word in words:
if len(line) + len(word) + 1 <= l: line += f' {word}'
else:
lines.append(line)
line = ""
if len(line) > 0: lines.append(line)
return "\n".join(lines)
# Cell
def deal_error(cell):
for i,out in enumerate(cell['outputs']):
if out['output_type'] == 'error':
msg = f"{out['ename']}: {out['evalue']}"
cell['outputs'][i] = nbformat.notebooknode.NotebookNode({
'data': {'text/plain': split_max_len(msg, 81) },
'execution_count': None,
'metadata': {},
'output_type': 'execute_result'})
return cell
# Cell
def remove_interrupted_pbars(cell):
outs = []
for out in cell['outputs']:
if 'data' not in out or 'text/plain' not in out['data'] or 'progress-bar-interrupted' not in out['data']['text/plain']:
outs.append(out)
cell['outputs'] = outs
return cell
# Cell
def get_cell_meta(cell):
for attr in ["id", "caption", "alt", "width"]:
if re.search(r'^\s*#\s*' + attr + r'\s(.*)$', cell["source"], re.MULTILINE) is not None:
cell["metadata"][attr] = re.search(r'^\s*#\s*' + attr + r'\s(.*)$', cell["source"], re.MULTILINE).groups()[0]
cell["source"] = re.sub(r'#\s*' + attr + r'\s.*?($|\n)', '', cell["source"])
return cell
# Cell
def caption_tables(cell):
if 'outputs' not in cell or len(cell['outputs']) == 0: return cell
output = cell['outputs'][0]
if 'data' not in output or 'text/plain' not in output['data']: return cell
text = output['data']['text/plain']
if re.search(r'^<\s*table\s+([^>]*>)', text) is None: return cell
table_id = cell['metadata'].get('id', None)
caption = cell['metadata'].get('caption', None)
text_id = '' if table_id is None else f'id="{table_id}" '
text_caption = '' if caption is None else f'\n <caption>{caption}</caption>'
output['data']['text/plain'] = re.sub(r'^<\s*table\s+([^>]*>)', '<table '+text_id+r'\1'+text_caption, text)
cell['outputs'][0] = output
return cell
# Cell
TEXT_MAX_WIDTH = 80
# Cell
def _wrap_output(output):
if 'text' in output:
lines = ['\n'.join(textwrap.wrap(l, width=TEXT_MAX_WIDTH, subsequent_indent = ' > ')) for l in output['text'].split('\n')]
output['text'] = '\n'.join(lines)
return output
if ('data' not in output or 'text/plain' not in output['data']): return output
text = output['data']['text/plain']
if re.search(r'^<\s*table\s*([^>]*>)', text) is not None: return output
lines = ['\n'.join(textwrap.wrap(l, width=TEXT_MAX_WIDTH, subsequent_indent = ' > ')) for l in text.split('\n')]
output['data']['text/plain'] = '\n'.join(lines)
return output
# Cell
def wrap_text_outputs(cell):
if 'outputs' not in cell or len(cell['outputs']) == 0: return cell
cell['outputs'] = [_wrap_output(o) for o in cell['outputs']]
return cell
# Cell
CODE_MAX_LEN = 80
# Cell
def check_code_len(cell):
lines = cell['source'].split('\n')
for l in lines:
if len(l) > CODE_MAX_LEN: warn(f"Found code too long in a cell:\n{cell['source']}")
return cell
# Cell
def deal_quotes(cell):
cell['source'] = re.sub(r'"`([^`]*)`"', r'`\1`', cell['source'])
cell['source'] = re.sub(r"'", r'xxsinglequote', cell['source'])
return cell
# Cell
def add_title_level(cell):
if cell['source'].startswith('#'): cell['source'] = '#' + cell['source']
return cell
# Cell
def deal_with_lists(cell):
lines = cell['source'].split('\n')
for i in range(len(lines)):
lines[i] = re.sub(r'(^\s*)\d*\.(.*)$', r'\1.\2xxnewl', lines[i])
lines[i] = re.sub(r'(^\s*)-\s(.*::)\s(.*)$', r'\2xxnewls\3xxnewl', lines[i])
cell['source'] = '\n'.join(lines)
return cell
# Cell
_re_block_notes = re.compile(r"""
# Catches any pattern > Title: content with title in group 1 and content in group 2
^\s*>\s* # > followed by any number of whitespace
([^:]*) # Catching group for any character but :
:\s* # : then any number of whitespace
([^\n]*) # Catching group for anything but a new line character
(?:\n|$) # Non-catching group for either a new line or the end of the text
""", re.VERBOSE | re.MULTILINE)
_re_forgot_column = re.compile("^\s*>[^:]*$", re.MULTILINE)
# Cell
def replace_jekylls(cell):
block_names = {'warning':'WARNING', 'note':'NOTE', 'important':'TIP', 'tip': 'TIP', 'stop': 'WARNING',
'jargon':'JARGON', 'question':'QUESTION', 'a': 'ALEXIS', 'j': 'JEREMY', 's': 'SYLVAIN'}
def _rep(m):
typ,text = m.groups()
name = block_names.get(typ.lower(), typ.upper())
if name in ['ALEXIS', 'JEREMY', 'SYLVAIN', 'JARGON', 'QUESTION']:
title = name[0]+name[1:].lower()
surro = 'NOTE'
if name=='JARGON':
splits = text.split(': ')
title = f'{title}: {splits[0]}'
text = ': '.join(splits[1:])
if name in ['ALEXIS', 'JEREMY', 'SYLVAIN']:
title = f"{title} says"
surro = 'TIP'
return f'```asciidoc\n.{title}\n[{surro}]\n====\n{text}\n====\n```\n'
elif len(name) != 0: return f"```asciidoc\n[{name}]\n====\n{text}\n====\n```\n"
else: return f"```asciidoc\n____\n{text}\n____\n```\n"
if _re_forgot_column.search(cell["source"]): warn("Found a non-processed block quote, please fix")
cell["source"] = _re_block_notes.sub(_rep, cell["source"])
return cell
# Cell
_re_sidebar = re.compile(r'^\s*#\s*sidebar\s(.*)$', re.MULTILINE)
# Cell
def interpret_sidebar(cell):
lines = cell["source"].split("\n")
if _re_sidebar.search(lines[0]) is not None:
title = _re_sidebar.search(lines[0]).groups()[0]
body = "\n".join(lines[1:])
cell["source"] = f"```asciidoc\n.{title}\n****\n{body}\n****\n```\n"
return cell
# Cell
_re_md_image = re.compile(r"^(<img\ [^>]*>)", re.MULTILINE)
# Cell
IMAGE_CONV_MULT = 0.6
# Cell
def process_images(cell):
h = HTMLParseAttrs()
def _rep(m):
d = h(m.groups()[0])
attrs = ['"' + d.get('alt', '') + '"']
if 'width' in d: attrs.append(str(int(IMAGE_CONV_MULT * int(d['width']))))
if 'width' in d and 'height' in d: attrs.append(str((int(IMAGE_CONV_MULT * int(d['height'])))))
suff = f"[{', '.join(attrs)}]"
pid = f"[[{d['id']}]]\n" if 'id' in d else ""
caption = f".{d['caption']}\n" if 'caption' in d else ""
return f"```asciidoc\n{pid}{caption}image::{d['src']}{suff}\n```"
cell["source"] = _re_md_image.sub(_rep, cell["source"])
return cell
# Cell
_re_reference = re.compile(r'<<([^>]*)>>')
# Cell
def wrap_references(cell):
cell["source"] = _re_reference.sub(r'xxref\1xxeref', cell["source"])
return cell
# Cell
def extract_attachments(cell, dest):
if not 'attachments' in cell: return cell
mime,img = first(first(cell['attachments'].values()).items())
ext = mime.split('/')[1]
for i in range(99999):
p = dest/(f'att_{i:05d}.{ext}')
if not p.exists(): break
p.write_bytes(b64decode(img))
del(cell['attachments'])
cell['source'] = re.sub('attachment:image.png', str(p), cell['source'])
return cell
# Cell
_re_sidebar_title = re.compile(r'#+\s+Sidebar:\s+(.*)$', re.IGNORECASE)
_re_end_sidebar = re.compile(r'#+\s+End sidebar', re.IGNORECASE)
# Cell
def sidebar_headers(cell):
cell['source'] = _re_sidebar_title.sub(r'```asciidoc\n.\1\n****\n```', cell['source'])
cell['source'] = _re_end_sidebar.sub(r'```asciidoc\n****\n```', cell['source'])
return cell
# Cell
code_cell_tfms = [get_cell_meta, replace_old_jekylls, hide_input, hide_output, extract_html, deal_error,
remove_interrupted_pbars, wrap_text_outputs, caption_tables, check_code_len]
md_cell_tfms = [deal_quotes, wrap_references, interpret_sidebar, sidebar_headers, add_title_level, deal_with_lists,
process_images, replace_jekylls]
# Cell
def add_new_line(cell):
cell['source'] = '\n' + cell['source']
return cell
# Cell
def treat_notebook(nb, dest):
nb['cells'] = remove_hidden_cells(nb['cells'])
tfm_func = {'code': compose(*code_cell_tfms), 'markdown': compose(partial(extract_attachments, dest=dest), *md_cell_tfms),
'raw': add_new_line}
nb['cells'] = [tfm_func[c['cell_type']](c) for c in nb['cells']]
nb['cells'] = isolate_adoc_blocks(nb['cells'])
return nb
# Cell
def rep_spec_tok(adoc, metadata=None):
adoc = re.sub('xxsinglequote', "'", adoc)
adoc = re.sub('xxnewls', '\n ', adoc)
return re.sub('xxnewl\s', '\n', adoc)
# Cell
def ipython2python(adoc, metadata=None):
return re.sub(r'\[source, ipython3\]','[source, python]', adoc)
# Cell
def remove_cells(adoc, metadata=None):
adoc = re.sub(r'\n\[source, python\]\n----(\n)*----\n','', adoc)
return re.sub(r'\n\[source, python\]\n----\n##remove##\n----\n','', adoc)
# Cell
_re_clear = re.compile(r'\[source, python\]\n----\n##clear##(.*?)----\n', re.DOTALL)
def clear_cells(adoc, metadata=None): return _re_clear.sub(r'\1', adoc)
# Cell
def format_latex(adoc, metadata=None):
#LaTeX equations
adoc = re.sub(r"latexmath:\[\$([^\$]*)\$\]", r"latexmath:[\\(\1\\)]", adoc)
return re.sub(r"latexmath:\[\\\[(.*)\\\]\]", r"\n[latexmath]\n++++\n\\begin{equation}\n\1\n\\end{equation}\n++++\n", adoc)
# Cell
_re_image_output = re.compile(r'----\n!\[(?:svg|png|jpg)\]\((.+)\)\n----')
# Cell
def format_outputs(adoc, metadata=None):
folder = ({} if metadata is None else metadata).get('folder', '.')
def _rep(m):
name = m.groups()[0]
d = metadata[name] if metadata is not None and name in metadata else {}
attrs = ['"' + d.get('alt', '') + '"']
if 'width' in d: attrs.append(str(d['width']))
if 'width' in d and 'height' in d: attrs.append(str(d['height']))
suff = f"[{', '.join(attrs)}]"
pid = f"[[{d['id']}]]\n" if 'id' in d else ""
caption = f".{d['caption']}\n" if 'caption' in d else ""
return f"{pid}{caption}image::{str(folder)}/{name}{suff}"
return _re_image_output.sub(_rep, adoc)
# Cell
def fix_quotes(adoc, metadata=None):
return re.sub(r"``([^'`]*)''", r'"\1"', adoc)
# Cell
def fix_references(adoc, metadata=None): return re.sub(r"xxref(.*)xxeref", r"<<\1>>", adoc)
# Cell
def format_tables(adoc, metadata=None):
splits = adoc.split('----')
seps = [''] + ['----' for _ in range(len(splits)-1)] + ['']
for i,s in enumerate(splits):
s = re.sub(r'<div>[\s\S]*<table', '<table', s)
s = re.sub('</div>', '', s)
s = re.sub('<p>', '', s)
s = re.sub('</p>', '', s)
if len(s) > 0 and not s.startswith('\n'): s = '\n' + s
if len(s) > 0 and not s.endswith('\n'): s = s + '\n'
if s.startswith('\n<table'): seps[i],seps[i+1] = '++++','++++'
elif '<table' in s:
res = re.search('<table', s)
begin,end = res.span()
s = s[:begin] + '\n----\n\n++++\n' + s[begin:]
seps[i+1] = '++++'
splits[i] = s
res = ''
for s,c in zip(seps,splits): res = res + s + c
return res.replace('\n\n--------', '')
# Cell
def remove_lines(text, metadata=None):
return re.sub(r'\n\n\n\n+([^\n])', r'\n\n\n\1', text)
# Cell
post_process_tfms = [fix_quotes, rep_spec_tok, ipython2python, remove_cells, clear_cells, format_latex,
format_outputs, fix_references, format_tables, remove_lines]
# Cell
def post_process(adoc, metadata=None):
if not adoc.startswith('\n'): adoc = '\n' + adoc
adoc = re.sub('xxnewl\s', '\n', adoc)
adoc = compose(*post_process_tfms)(adoc, metadata=metadata)
return adoc.strip()
# Cell
c = ExportConfig()
exporter = ASCIIDocExporter(c)
exporter.exclude_input_prompt=True
exporter.exclude_output_prompt=True
# Cell
def add_metadata(nb):
"Stripping removes metadata used in the conversion."
if 'language_info' not in nb['metadata']:
nb['metadata']['language_info'] = {
'codemirror_mode': {'name': 'ipython', 'version': 3},
'file_extension': '.py',
'mimetype': 'text/x-python',
'name': 'python',
'nbconvert_exporter': 'python',
'pygments_lexer': 'ipython3',
'version': '3.7.1'}
return nb
# Cell
def output_num(n):
m = re.search(r'^output_(\d*)_', n)
if m is None: return
return int(m.groups()[0])
# Cell
import PIL
# Cell
IMAGE_OUT_MULT = 0.8
# Cell
import xml.etree.ElementTree as ET
# Cell
def get_output_width(name, raw, folder):
if name.endswith('.svg'): return ET.fromstring(raw).attrib['width'].split('.')[0].replace('pt', '')
try: return PIL.Image.open(Path(folder)/name).size[0]
except: return None
# Cell
def convert_nb(fname, dest_path='.', folder=None):
"Convert a notebook `fname` to html file in `dest_path`."
print(f"Converting {fname}")
fname = Path(fname)
dest_name = fname.with_suffix('.asciidoc').name
if folder is None: folder = Path(dest_path)/f'{fname.stem}_files'
#folder for images. Clear if exists
if folder.exists(): shutil.rmtree(folder)
os.makedirs(folder, exist_ok=True)
nb = add_metadata(treat_notebook(read_nb(fname), folder))
export = exporter.from_notebook_node(nb)
metadata = {'folder': folder.relative_to(dest_path)}
metadata.update({n: nb["cells"][output_num(n)]['metadata'] for n in export[1]['outputs'].keys() if output_num(n) is not None})
for n,o in export[1]['outputs'].items():
with open(Path(folder)/n, 'wb') as f: f.write(o)
w = metadata[n]['width'] if 'width' in metadata[n] else get_output_width(n, o, folder)
if w is not None: metadata[n]['width'] = str(int(IMAGE_OUT_MULT * int(w)))
with open(f'{dest_path}/{dest_name}','w', encoding="utf8") as f:
f.write(post_process(export[0], metadata))
# Cell
def _copy_images(path, dest_path):
os.makedirs(dest_path, exist_ok=True)
for f in path.iterdir():
if f.is_file(): shutil.copy(f, dest_path/f.name)
if f.is_dir(): _copy_images(f, dest_path/f.name)
# Cell
def copy_images(path, dest_path):
img_folder = dest_path/"images"
if img_folder.exists(): shutil.rmtree(img_folder)
_copy_images(path/"images", img_folder)
# Cell
def _convert1(fname, dest_path='.'):
try: convert_nb(fname, dest_path=dest_path)
except Exception as e:
print(f"Error in notebook {fname}")
print(e)
# Cell
@call_parse
def fastdoc_convert_all(
path:Param("Path to notebooks",str)='book',
dest_path:Param("Path to generated asciidoc files",str)='../convert_book'):
path,dest_path = Path(path),Path(dest_path)
dest_path.mkdir(parents=True,exist_ok=True)
(path/'images').mkdir(parents=True,exist_ok=True)
nbs = [f for f in path.iterdir() if f.suffix == '.ipynb' and not f.name.startswith('_')]
parallel(_convert1, nbs, dest_path=dest_path)
for f in path.iterdir():
if f.suffix in ['.adoc', '.asciidoc']: shutil.copy(f, dest_path/f.name)
copy_images(path, dest_path)
| 38.220039
| 132
| 0.595764
|
6b5b3cee13ff6babcba101a1f212a4d2a268d965
| 2,975
|
py
|
Python
|
tests/sentry/mediators/sentry_app_installations/test_destroyer.py
|
ibm5155/Sentry9.1_StableIthink
|
e3c6f22805b9aaaaa7ba41003946dd0e5f894340
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/mediators/sentry_app_installations/test_destroyer.py
|
ibm5155/Sentry9.1_StableIthink
|
e3c6f22805b9aaaaa7ba41003946dd0e5f894340
|
[
"BSD-3-Clause"
] | 1
|
2019-03-13T06:05:24.000Z
|
2019-03-13T06:05:24.000Z
|
tests/sentry/mediators/sentry_app_installations/test_destroyer.py
|
emiquelito/sentry
|
fa310029ec7e21293d632bb672a59877bc2e5531
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import responses
from django.db import connection
from sentry.mediators.sentry_app_installations import Creator, Destroyer
from sentry.models import ApiAuthorization, ApiGrant, SentryAppInstallation, ServiceHook
from sentry.testutils import TestCase
class TestDestroyer(TestCase):
@responses.activate
def setUp(self):
self.user = self.create_user()
self.org = self.create_organization()
self.project = self.create_project(organization=self.org)
responses.add(responses.POST, 'https://example.com/webhook')
self.sentry_app = self.create_sentry_app(
name='nulldb',
organization=self.org,
scopes=('project:read', 'event:read'),
events=('issue',),
)
self.install = Creator.run(
organization=self.org,
slug='nulldb',
user=self.user,
)
self.destroyer = Destroyer(
install=self.install,
user=self.user,
)
@responses.activate
def test_deletes_authorization(self):
auth = self.install.authorization
responses.add(responses.POST, 'https://example.com/webhook')
self.destroyer.call()
assert not ApiAuthorization.objects.filter(pk=auth.id).exists()
@responses.activate
def test_deletes_grant(self):
grant = self.install.api_grant
responses.add(responses.POST, 'https://example.com/webhook')
self.destroyer.call()
assert not ApiGrant.objects.filter(pk=grant.id).exists()
@responses.activate
def test_deletes_without_grant(self):
self.install.api_grant.delete()
self.install.update(api_grant=None)
responses.add(responses.POST, 'https://example.com/webhook')
assert self.destroyer.call()
@responses.activate
def test_deletes_service_hooks(self):
hook = self.create_service_hook(
application=self.sentry_app.application,
org=self.org,
project=self.project,
actor=self.install,
)
responses.add(responses.POST, 'https://example.com/webhook')
self.destroyer.call()
assert not ServiceHook.objects.filter(pk=hook.id).exists()
@responses.activate
def test_soft_deletes_installation(self):
responses.add(responses.POST, 'https://example.com/webhook')
self.destroyer.call()
with self.assertRaises(SentryAppInstallation.DoesNotExist):
SentryAppInstallation.objects.get(pk=self.install.id)
# The QuerySet will automatically NOT include deleted installs, so we
# use a raw sql query to ensure it still exists.
c = connection.cursor()
c.execute(
'SELECT COUNT(1) '
'FROM sentry_sentryappinstallation '
'WHERE id = %s AND date_deleted IS NOT NULL',
[self.install.id])
assert c.fetchone()[0] == 1
| 30.670103
| 88
| 0.647731
|
7d36bd5f616b28772e2c25eea81fadfdc8d6afdf
| 8,220
|
py
|
Python
|
culebratester_client/configuration.py
|
dtmilano/CulebraTester2-client
|
21979a851943c9a30c3b5f31eed21c1b1d4894dd
|
[
"Apache-2.0"
] | 7
|
2020-02-07T14:37:09.000Z
|
2022-03-11T09:54:47.000Z
|
culebratester_client/configuration.py
|
dtmilano/CulebraTester2-client
|
21979a851943c9a30c3b5f31eed21c1b1d4894dd
|
[
"Apache-2.0"
] | null | null | null |
culebratester_client/configuration.py
|
dtmilano/CulebraTester2-client
|
21979a851943c9a30c3b5f31eed21c1b1d4894dd
|
[
"Apache-2.0"
] | 1
|
2021-09-11T03:18:37.000Z
|
2021-09-11T03:18:37.000Z
|
# coding: utf-8
"""
CulebraTester
## Snaky Android Test --- If you want to be able to try out the API using the **Execute** or **TRY** button from this page - an android device should be connected using `adb` - the server should have been started using `./culebratester2 start-server` then you will be able to invoke the API and see the responses. # noqa: E501
OpenAPI spec version: 2.0.20
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class TypeWithDefault(type):
def __init__(cls, name, bases, dct):
super(TypeWithDefault, cls).__init__(name, bases, dct)
cls._default = None
def __call__(cls):
if cls._default is None:
cls._default = type.__call__(cls)
return copy.copy(cls._default)
def set_default(cls, default):
cls._default = copy.copy(default)
class Configuration(six.with_metaclass(TypeWithDefault, object)):
"""NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
def __init__(self):
"""Constructor"""
# Default Base url
self.host = "http://localhost:9987/v2"
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# function to refresh API key if expired
self.refresh_api_key_hook = None
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("culebratester_client")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API
# from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
# Set this to True/False to enable/disable SSL hostname verification.
self.assert_hostname = None
# urllib3 connection pool's maximum number of connections saved
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
}
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 2.0.20\n"\
"SDK Package Version: 2.0.20".\
format(env=sys.platform, pyversion=sys.version)
| 33.55102
| 333
| 0.620073
|
aa0eaa9b80d14e62a9fff3dcc195a66cb3d71da8
| 382
|
py
|
Python
|
empty_config.py
|
SamCHogg/Ark-Bot
|
3fe1ea32ebaf26563dca6827807ed534d60676a9
|
[
"MIT"
] | null | null | null |
empty_config.py
|
SamCHogg/Ark-Bot
|
3fe1ea32ebaf26563dca6827807ed534d60676a9
|
[
"MIT"
] | null | null | null |
empty_config.py
|
SamCHogg/Ark-Bot
|
3fe1ea32ebaf26563dca6827807ed534d60676a9
|
[
"MIT"
] | null | null | null |
# Discord App token
bbtoken = "" #REMOVE TOKEN BEFORE SYNC!!
logfile = 'discord.log' #Name of the file to log to (eg. 'discord.log')
pref = "!" #Command prefix
des = "ARK-Bot" #Description shown on using the help command
ark_install_dir =
# Error messages
err_mesg = ":x: **An error occurred!**"
err_mesg_permission = ":x: **You don't have the permission to use this command.**"
| 29.384615
| 82
| 0.698953
|
4dada5a5d45812cc371b2b6a457ae2c72c56b829
| 12,192
|
py
|
Python
|
Apps/rsp/axiom.py
|
zhanghongce/ila-mcm-fmcad18
|
e7045e38e45e758f2b0e0ecc7d4369f5014b8707
|
[
"MIT"
] | null | null | null |
Apps/rsp/axiom.py
|
zhanghongce/ila-mcm-fmcad18
|
e7045e38e45e758f2b0e0ecc7d4369f5014b8707
|
[
"MIT"
] | null | null | null |
Apps/rsp/axiom.py
|
zhanghongce/ila-mcm-fmcad18
|
e7045e38e45e758f2b0e0ecc7d4369f5014b8707
|
[
"MIT"
] | null | null | null |
import z3
def SameWg(i1,i2):
e1 = i1.Eid ; e2 = i2.Eid
assert e1.anyd == e2.anyd and e1.anyw == e2.anyw and e1.anyt == e2.anyt
retL = True
if not e1.anyd:
retL = retL and ( e1.d == e2.d )
if not e1.anyw:
retL = retL and ( e1.w == e2.w )
return retL
def SameDv(i1,i2):
e1 = i1.Eid ; e2 = i2.Eid
assert e1.anyd == e2.anyd and e1.anyw == e2.anyw and e1.anyt == e2.anyt
if not e1.anyd:
return e1.d == e2.d
return True
def HB(a,b):
return a.timestamp < b.timestamp
def HBd(a,b):
return z3.Implies( z3.And( a.inst.decodeFunc, b.inst.decodeFunc), a.timestamp < b.timestamp )
def either_or(a,b):
return ( a and not b ) or (b and not a)
def SameAddr(i1,i2):
assert either_or(i1.wvop is None , i1.rvop is None)
assert either_or(i2.wvop is None , i2.rvop is None)
addr1 = i1.wvop.addr if i1.wvop is not None else i1.rvop.addr
addr2 = i2.wvop.addr if i2.wvop is not None else i2.rvop.addr
return addr1 == addr2
def SameData(i1,i2):
assert either_or(i1.wvop is None , i1.rvop is None)
assert either_or(i2.wvop is None , i2.rvop is None)
val1 = i1.wvop.value if i1.wvop is not None else i1.rvop.value
val2 = i2.wvop.value if i2.wvop is not None else i2.rvop.value
return val1 == val2
def LAnd(l):
if len(l) == 0:
print '<W> Axiom Scenario does not exist'
return z3.BoolVal(True)
elif len(l) == 1:
return l[0]
else:
return z3.And(l)
def LOr(l):
if len(l) == 0:
return z3.BoolVal(True)
elif len(l) == 1:
return l[0]
else:
return z3.Or(l)
def PO2Axioms(obj):
# ----- AXIOM load_na_or_WG_SC BEGIN -----
var4_L = []
for l1 in obj.load_na_or_WG_list: # forall l1 : obj.load_na_or_WG_list
var2_L = []
for l2 in obj.LOAD_list: # forall l2 : obj.LOAD_list
if l2 is l1: continue
var1 = z3.Implies( True , z3.Implies( ( z3.And( HB ( l1 , l2 ) , SameWg ( l1 , l2 ) ) ) , HB ( l1.rvop.WG , l2.rvop.WG ) ) )
var2_L.append( var1)
var3 = LAnd( var2_L)
var4_L.append( var3)
var5 = LAnd( var4_L)
obj.runProp.append( z3.simplify( var5 ) )
# ----- AXIOM load_na_or_WG_SC END -----
# ----- AXIOM load_DV_N_WG_SC BEGIN -----
var9_L = []
for l1 in obj.load_DV_N_list: # forall l1 : obj.load_DV_N_list
var7_L = []
for l2 in obj.LOAD_list: # forall l2 : obj.LOAD_list
if l2 is l1: continue
var6 = z3.Implies( True , z3.Implies( ( z3.And( HB ( l1 , l2 ) , SameWg ( l1 , l2 ) ) ) , HB ( l1.rvop.WG , l2.rvop.WG ) ) )
var7_L.append( var6)
var8 = LAnd( var7_L)
var9_L.append( var8)
var10 = LAnd( var9_L)
obj.runProp.append( z3.simplify( var10 ) )
# ----- AXIOM load_DV_N_WG_SC END -----
# ----- AXIOM load_DV_N_WG_REL BEGIN -----
var14_L = []
for l1 in obj.load_DV_N_list: # forall l1 : obj.load_DV_N_list
var12_L = []
for l2 in obj.LOAD_list: # forall l2 : obj.LOAD_list
if l2 is l1: continue
var11 = z3.Implies( True , z3.Implies( ( z3.And( HB ( l1.rvop.WG , l2.rvop.WG ) , SameWg ( l1 , l2 ) ) ) , HBd ( l1.rvop.DV , l2.rvop.DV ) ) )
var12_L.append( var11)
var13 = LAnd( var12_L)
var14_L.append( var13)
var15 = LAnd( var14_L)
obj.runProp.append( z3.simplify( var15 ) )
# ----- AXIOM load_DV_N_WG_REL END -----
# ----- AXIOM load_DV_R_WG_SC BEGIN -----
var19_L = []
for l1 in obj.load_DV_R_list: # forall l1 : obj.load_DV_R_list
var17_L = []
for l2 in obj.LOAD_list: # forall l2 : obj.LOAD_list
if l2 is l1: continue
var16 = z3.Implies( True , z3.Implies( ( z3.And( HB ( l1 , l2 ) , SameWg ( l1 , l2 ) ) ) , HB ( l1.rvop.WG , l2.rvop.WG ) ) )
var17_L.append( var16)
var18 = LAnd( var17_L)
var19_L.append( var18)
var20 = LAnd( var19_L)
obj.runProp.append( z3.simplify( var20 ) )
# ----- AXIOM load_DV_R_WG_SC END -----
# ----- AXIOM load_DV_R_WG_REL BEGIN -----
var24_L = []
for l1 in obj.load_DV_R_list: # forall l1 : obj.load_DV_R_list
var22_L = []
for l2 in obj.LOAD_list: # forall l2 : obj.LOAD_list
if l2 is l1: continue
var21 = z3.Implies( True , z3.Implies( ( z3.And( HB ( l1.rvop.WG , l2.rvop.WG ) , SameWg ( l1 , l2 ) ) ) , HBd ( l1.rvop.DV , l2.rvop.DV ) ) )
var22_L.append( var21)
var23 = LAnd( var22_L)
var24_L.append( var23)
var25 = LAnd( var24_L)
obj.runProp.append( z3.simplify( var25 ) )
# ----- AXIOM load_DV_R_WG_REL END -----
# ----- AXIOM store_na_or_WG_SC BEGIN -----
var29_L = []
for s1 in obj.store_na_or_WG_list: # forall s1 : obj.store_na_or_WG_list
var27_L = []
for s2 in obj.STORE_list: # forall s2 : obj.STORE_list
if s2 is s1: continue
var26 = z3.Implies( True , z3.Implies( ( z3.And( HB ( s2 , s1 ) , SameWg ( s2 , s1 ) ) ) , HB ( s2.wvop.WG , s1.wvop.WG ) ) )
var27_L.append( var26)
var28 = LAnd( var27_L)
var29_L.append( var28)
var30 = LAnd( var29_L)
obj.runProp.append( z3.simplify( var30 ) )
# ----- AXIOM store_na_or_WG_SC END -----
# ----- AXIOM store_DV_N_SC BEGIN -----
var34_L = []
for s1 in obj.store_DV_N_list: # forall s1 : obj.store_DV_N_list
var32_L = []
for s2 in obj.STORE_list: # forall s2 : obj.STORE_list
if s2 is s1: continue
var31 = z3.Implies( True , z3.Implies( ( z3.And( HB ( s2 , s1 ) , SameWg ( s2 , s1 ) ) ) , HB ( s2.wvop.WG , s1.wvop.WG ) ) )
var32_L.append( var31)
var33 = LAnd( var32_L)
var34_L.append( var33)
var35 = LAnd( var34_L)
obj.runProp.append( z3.simplify( var35 ) )
# ----- AXIOM store_DV_N_SC END -----
# ----- AXIOM store_DV_N_WG_DV BEGIN -----
var39_L = []
for s1 in obj.store_DV_N_list: # forall s1 : obj.store_DV_N_list
var37_L = []
for s2 in obj.STORE_list: # forall s2 : obj.STORE_list
if s2 is s1: continue
var36 = z3.Implies( True , z3.Implies( ( z3.And( HB ( s2.wvop.WG , s1.wvop.WG ) , SameWg ( s1 , s2 ) ) ) , HB ( s2.wvop.DV , s1.wvop.DV ) ) )
var37_L.append( var36)
var38 = LAnd( var37_L)
var39_L.append( var38)
var40 = LAnd( var39_L)
obj.runProp.append( z3.simplify( var40 ) )
# ----- AXIOM store_DV_N_WG_DV END -----
# ----- AXIOM store_DV_R_PO_WG BEGIN -----
var44_L = []
for s1 in obj.store_DV_R_list: # forall s1 : obj.store_DV_R_list
var42_L = []
for s2 in obj.STORE_list: # forall s2 : obj.STORE_list
if s2 is s1: continue
var41 = z3.Implies( True , z3.Implies( ( z3.And( HB ( s2 , s1 ) , SameWg ( s2 , s1 ) ) ) , HB ( s2.wvop.WG , s1.wvop.WG ) ) )
var42_L.append( var41)
var43 = LAnd( var42_L)
var44_L.append( var43)
var45 = LAnd( var44_L)
obj.runProp.append( z3.simplify( var45 ) )
# ----- AXIOM store_DV_R_PO_WG END -----
# ----- AXIOM store_DV_R_WG_DV BEGIN -----
var49_L = []
for s1 in obj.store_DV_R_list: # forall s1 : obj.store_DV_R_list
var47_L = []
for s2 in obj.STORE_list: # forall s2 : obj.STORE_list
if s2 is s1: continue
var46 = z3.Implies( True , z3.Implies( ( z3.And( HB ( s2.wvop.WG , s1.wvop.WG ) , SameWg ( s1 , s2 ) ) ) , HB ( s2.wvop.DV , s1.wvop.DV ) ) )
var47_L.append( var46)
var48 = LAnd( var47_L)
var49_L.append( var48)
var50 = LAnd( var49_L)
obj.runProp.append( z3.simplify( var50 ) )
# ----- AXIOM store_DV_R_WG_DV END -----
def store_dv_r_3(obj):
def RF(s,l): # read-from s --> load
if not SameDv(s,l): return True # this is no use: dummy True
var1_L = []
for sother in obj.STORE_list: # forall stores (~=s)
if sother is s: continue
var2 = z3.Or( CO(sother,s) , FR(l, sother) )
var1_L.append(var2)
var3 = z3.And( var1_L )
## SameWg(s,l) , z3.And( [var3, SameAddr(s,l), SameData(s,l) , HBd(s.wvop.WG, l.rvop.WG) ] )
if SameWg(s,l): # this is not made dynamic
return z3.And( [var3, SameAddr(s,l), SameData(s,l) , HBd(s.wvop.WG, l.rvop.WG) ] )
# else: SameDv(s,l):
return z3.And( [var3, SameAddr(s,l), SameData(s,l) , HB(s.wvop.DV, l.rvop.DV), s.wvop.DV.inst.decodeFunc, l.rvop.DV.inst.decodeFunc ] )
def FR(l,s):
if not SameDv(s,l): return True
if SameWg(s,l):
return z3.Implies( SameAddr(s,l) , HBd(l.rvop.WG, s.wvop.WG ) )
return z3.Implies( SameAddr(s,l) , HBd(l.rvop.DV, s.wvop.DV ) )
def CO(s1,s2):
if not SameDv(s1,s2): return True
if SameWg(s1,s2):
return z3.Implies( SameAddr(s1,s2) , HB(s1.wvop.WG, s2.wvop.WG ) )
return z3.Implies( SameAddr(s1,s2) , HB(s1.wvop.DV, s2.wvop.DV ) )
# ----- AXIOM store_DV_R_RSP BEGIN -----
var6_L = []
for s1 in obj.store_DV_R_list: # forall s1 : obj.store_DV_R_list
var4_L = []
for r in obj.LOAD_list: # forall r : obj.LOAD_list
var2_L = []
for r2 in obj.LOAD_list: # forall r2 : obj.LOAD_list
if r2 is r: continue
var1 = z3.Implies( True , z3.Implies( ( z3.And( HB ( r.rvop.WG , r2.rvop.WG ) , z3.BoolVal( SameWg ( r , r2 ) ) ) ) , HBd ( s1.wvop.DV , r2.rvop.DV ) ) )
var2_L.append( var1)
var3 = LAnd( var2_L)
var4_L.append( z3.Implies( z3.And( z3.BoolVal( SameDv ( s1,r) ), RF ( s1 , r ) ) , ( var3 ) ) )
var5 = LAnd( var4_L)
var6_L.append( var5)
var7 = LAnd( var6_L)
obj.runProp.append( z3.simplify( var7 ) ) #
# ----- AXIOM store_DV_R_RSP END -----
def load_dv_r_3(obj):
def RF(s,l): # read-from s --> load
if not SameDv(s,l): return True # this is no use: dummy True
var1_L = []
for sother in obj.STORE_list: # forall stores (~=s)
if sother is s: continue
var2 = z3.Or( CO(sother,s) , FR(l, sother) )
var1_L.append(var2)
var3 = z3.And( var1_L )
## SameWg(s,l) , z3.And( [var3, SameAddr(s,l), SameData(s,l) , HBd(s.wvop.WG, l.rvop.WG) ] )
if SameWg(s,l): # this is not made dynamic
return z3.And( [var3, SameAddr(s,l), SameData(s,l) , HBd(s.wvop.WG, l.rvop.WG) ] )
# else: SameDv(s,l): # we must make sure they read from the way we define
return z3.And( [var3, SameAddr(s,l), SameData(s,l) , HB(s.wvop.DV, l.rvop.DV), s.wvop.DV.inst.decodeFunc, l.rvop.DV.inst.decodeFunc ] )
def FR(l,s):
if not SameDv(s,l): return True
if SameWg(s,l):
return z3.Implies( SameAddr(s,l) , HBd(l.rvop.WG, s.wvop.WG ) )
return z3.Implies( SameAddr(s,l) , HBd(l.rvop.DV, s.wvop.DV ) )
def CO(s1,s2):
if not SameDv(s1,s2): return True
if SameWg(s1,s2):
return z3.Implies( SameAddr(s1,s2) , HB(s1.wvop.WG, s2.wvop.WG ) )
return z3.Implies( SameAddr(s1,s2) , HB(s1.wvop.DV, s2.wvop.DV ) )
# ----- AXIOM load_DV_R_RSP BEGIN -----
var6_L = []
for l1 in obj.load_DV_R_list: # forall l1 : obj.load_DV_R_list
var4_L = []
for s1 in obj.STORE_list: # forall s1 : obj.STORE_list
var2_L = []
for s2 in obj.STORE_list: # forall s2 : obj.STORE_list
if s2 is s1: continue
var1 = z3.Implies( True , z3.Implies( ( z3.And( HB ( s2.wvop.WG , s1.wvop.WG ) , z3.BoolVal( SameWg ( s1 , s2 ) ) ) ) , HBd ( s2.wvop.DV , l1.end ) ) )
var2_L.append( var1)
var3 = LAnd( var2_L)
var4_L.append( z3.Implies( ( z3.And( z3.BoolVal( SameDv ( s1 , l1 ) ) , RF ( s1 , l1 ) ) ) , ( var3 ) ) )
var5 = LAnd( var4_L)
var6_L.append( var5)
var7 = LAnd( var6_L)
obj.runProp.append( z3.simplify( var7 ) )
# ----- AXIOM load_DV_R_RSP END -----
| 42.480836
| 174
| 0.551263
|
4e8ddcfb3d47e1f172a8732522d663da3c262399
| 3,737
|
py
|
Python
|
app.py
|
dtolb/bandwidth-python-quickstart
|
405231879bf745b1eacfb3b9ca462036abb1fdf3
|
[
"MIT"
] | 1
|
2018-07-26T05:50:55.000Z
|
2018-07-26T05:50:55.000Z
|
app.py
|
dtolb/bandwidth-python-quickstart
|
405231879bf745b1eacfb3b9ca462036abb1fdf3
|
[
"MIT"
] | 2
|
2021-03-22T17:05:36.000Z
|
2021-06-01T21:41:33.000Z
|
app.py
|
dtolb/bandwidth-python-quickstart
|
405231879bf745b1eacfb3b9ca462036abb1fdf3
|
[
"MIT"
] | 1
|
2018-07-26T05:50:56.000Z
|
2018-07-26T05:50:56.000Z
|
import os
import sys
import pprint
import json
from bw_clients import *
from flask import Flask, request, render_template
from quick_deploy import *
app = Flask(__name__)
def po(o):
"""
Prints things to console much more nicely than the default print
"""
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(o)
# Global variable for app name
bw_application_id = ''
# Just a nice hello world :)
@app.route('/')
def hello():
global bw_application_id
bw_application_id = check_or_create_application(request, bw_application_id)
my_numbers = check_or_create_phone_number(bw_application_id, '910')
numbers = ''
for number in my_numbers:
numbers = numbers + number['national_number'] + '\n'
return render_template('index.html', numbers_list=my_numbers)
@app.route(call_path, methods=['POST'])
def handle_voice():
"""
Setup a callback handler for POST voice events, with AUTO-ANSWER
keep in mind that if you have this setup in a BXML (for voice) app you will
need to rework the response. And change the method to GET
"""
callback_event = request.get_json()
po(callback_event)
# Get the event type
event_type = callback_event['eventType']
# get the call id
call_id = callback_event['callId']
# ignore incoming call events
if event_type == 'incomingCall':
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
# here we go, call is answered
elif event_type == 'answer':
# Play mp3
voice_api.play_audio_to_call(call_id, 'https://s3.amazonaws.com/bwdemos/hello.mp3')
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
# Finally if the playback is over hangup the call
elif event_type == 'playback' and callback_event['status'] == 'done':
voice_api.hangup_call(call_id)
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
elif event_type == 'hangup':
#send a message to them
messaging_api.send_message( from_ = callback_event['to'],
to = callback_event['from'],
text = ':) That was fun!',
media = ['https://s3.amazonaws.com/bwdemos/hello.jpg'])
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
else:
# Ignore everything else
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
@app.route(message_path, methods=['POST'])
def handle_message():
"""
Setup a callback handler for POST message events, keep in mind that if you
have this setup in a BXML (for voice) app, that this should be GET as well
"""
callback_event = request.get_json()
po(callback_event)
event_type = callback_event['eventType']
if event_type == 'sms':
messaging_api.send_message( from_ = callback_event['to'],
to = callback_event['from'],
text = 'Great job texting! Keep it up')
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
elif event_type == 'mms':
messaging_api.send_message( from_ = callback_event['to'],
to = callback_event['from'],
text = 'Great job sending a mms. Here is a cute dog',
media = ['https://s3.amazonaws.com/bwdemos/cute_dog.jpg'])
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
else:
# Ignore everything else
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
| 35.932692
| 92
| 0.627509
|
9e32778810f85adaa449543f6adf9510c517102e
| 684
|
py
|
Python
|
crowdsourcing/migrations/0018_auto_20150709_0208.py
|
ramcn/sept20
|
e6f6e238d0561ebf3353158161f1b20052e8b08b
|
[
"MIT"
] | 1
|
2016-02-29T01:26:42.000Z
|
2016-02-29T01:26:42.000Z
|
crowdsourcing/migrations/0018_auto_20150709_0208.py
|
ramcn/sept20
|
e6f6e238d0561ebf3353158161f1b20052e8b08b
|
[
"MIT"
] | 16
|
2015-08-10T18:28:18.000Z
|
2022-03-11T23:12:48.000Z
|
crowdsourcing/migrations/0018_auto_20150709_0208.py
|
Milstein/crowdsource-platform
|
60427e440373824c26c7daf8cf5f421b9c7ebbb5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0017_auto_20150709_0204'),
]
operations = [
migrations.RenameField(
model_name='conversationrecipient',
old_name='message',
new_name='conversation',
),
migrations.RemoveField(
model_name='conversationrecipient',
name='status',
),
migrations.AddField(
model_name='message',
name='status',
field=models.IntegerField(default=1),
),
]
| 23.586207
| 53
| 0.578947
|
a2f5b333cc331ab01ae86b1646be24525c55bbcf
| 367
|
py
|
Python
|
lib/JumpScale/core/system/string.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | 1
|
2015-10-26T10:38:13.000Z
|
2015-10-26T10:38:13.000Z
|
lib/JumpScale/core/system/string.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | 4
|
2016-08-25T12:08:39.000Z
|
2018-04-12T12:36:01.000Z
|
lib/JumpScale/core/system/string.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | 3
|
2016-03-08T07:49:34.000Z
|
2018-10-19T13:56:43.000Z
|
import unicodedata
class String:
#exceptions = Exceptions
def decodeUnicode2Asci(self,text):
return unicodedata.normalize('NFKD', text.decode("utf-8")).encode('ascii','ignore')
def toolStripNonAsciFromText(self,text):
return "".join([char for char in str(text) if ((ord(char)>31 and ord(char)<127) or ord(char)==10)])
| 30.583333
| 107
| 0.648501
|
1f3caa1ed61a010b27bf3acced1c9e863bfb71c4
| 6,095
|
py
|
Python
|
var/spack/repos/builtin/packages/xrootd/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3
|
2021-09-29T02:14:40.000Z
|
2022-01-27T20:50:36.000Z
|
var/spack/repos/builtin/packages/xrootd/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2022-02-28T11:30:18.000Z
|
2022-03-23T19:34:56.000Z
|
var/spack/repos/builtin/packages/xrootd/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xrootd(CMakePackage):
"""The XROOTD project aims at giving high performance, scalable fault
tolerant access to data repositories of many kinds."""
homepage = "http://xrootd.org"
url = "http://xrootd.org/download/v5.3.1/xrootd-5.3.1.tar.gz"
list_url = 'https://xrootd.slac.stanford.edu/dload.html'
version('5.3.2', sha256='e8371fb9e86769bece74b9b9d67cb695023cd6a20a1199386fddd9ed840b0875')
version('5.3.1', sha256='7ea3a112ae9d8915eb3a06616141e5a0ee366ce9a5e4d92407b846b37704ee98')
version('5.1.0', sha256='c639536f1bdc5b6b365e807f3337ed2d41012cd3df608d40e91ed05f1c568b6d')
version('5.0.3', sha256='be40a1897d6c1f153d3e23c39fe96e45063bfafc3cc073db88a1a9531db79ac5')
version('5.0.1', sha256='ff4462b0b61db4cc01dda0e26abdd78e43649ee7ac5e90f7a05b74328ff5ac83')
version('4.12.6', sha256='1a9056ab7aeeaafa586ea77e442960c71d233c9ba60c7f9db9262c1410954ac4')
version('4.12.3', sha256='6f2ca1accc8d49d605706bb556777c753860bf46d845b1ee11393a5cb5987f15')
version('4.12.2', sha256='29f7bc3ea51b9d5d310eabd177152245d4160223325933c67f938ed5120f67bb')
version('4.12.1', sha256='7350d9196a26d17719b839fd242849e3995692fda25f242e67ac6ec907218d13')
version('4.12.0', sha256='69ef4732256d9a88127de4bfdf96bbf73348e0c70ce1d756264871a0ffadd2fc')
version('4.11.3', sha256='8e7a64fd55dfb452b6d5f76a9a97c493593943227b377623a3032da9197c7f65')
version('4.11.2', sha256='4620824db97fcc37dc3dd26110da8e5c3aab1d8302e4921d4f32e83207060603')
version('4.10.0', sha256='f07f85e27d72e9e8ff124173c7b53619aed8fcd36f9d6234c33f8f7fd511995b')
version('4.8.5', sha256='42e4d2cc6f8b442135f09bcc12c7be38b1a0c623a005cb5e69ff3d27997bdf73')
version('4.8.4', sha256='f148d55b16525567c0f893edf9bb2975f7c09f87f0599463e19e1b456a9d95ba')
version('4.8.3', sha256='9cd30a343758b8f50aea4916fa7bd37de3c37c5b670fe059ae77a8b2bbabf299')
version('4.8.2', sha256='8f28ec53e799d4aa55bd0cc4ab278d9762e0e57ac40a4b02af7fc53dcd1bef39')
version('4.8.1', sha256='edee2673d941daf7a6e5c963d339d4a69b4db5c4b6f77b4548b3129b42198029')
version('4.8.0', sha256='0b59ada295341902ca01e9d23e29780fb8df99a6d2bd1c2d654e9bb70c877ad8')
version('4.7.1', sha256='90ddc7042f05667045b06e02c8d9c2064c55d9a26c02c50886254b8df85fc577')
version('4.7.0', sha256='6cc69d9a3694e8dcf2392e9c3b518bd2497a89b3a9f25ffaec62efa52170349b')
version('4.6.1', sha256='0261ce760e8788f85d68918d7702ae30ec677a8f331dae14adc979b4cc7badf5')
version('4.6.0', sha256='b50f7c64ed2a4aead987de3fdf6fce7ee082407ba9297b6851cd917db72edd1d')
version('4.5.0', sha256='27a8e4ef1e6bb6bfe076fef50afe474870edd198699d43359ef01de2f446c670')
version('4.4.1', sha256='3c295dbf750de086c04befc0d3c7045fd3976611c2e75987c1477baca37eb549')
version('4.4.0', sha256='f066e7488390c0bc50938d23f6582fb154466204209ca92681f0aa06340e77c8')
version('4.3.0', sha256='d34865772d975b5d58ad80bb05312bf49aaf124d5431e54dc8618c05a0870e3c')
variant('http', default=True,
description='Build with HTTP support')
variant('python', default=False,
description='Build pyxroot Python extension')
variant('readline', default=True,
description='Use readline')
variant('krb5', default=False,
description='Build with KRB5 support')
variant('cxxstd',
default='11',
values=('98', '11', '14', '17'),
multi=False,
description='Use the specified C++ standard when building.')
conflicts('cxxstd=98', when='@4.7.0:')
depends_on('bzip2')
depends_on('cmake@2.6:', type='build')
depends_on('libxml2', when='+http')
depends_on('uuid', when="@4.11.0:")
depends_on('openssl@:1')
depends_on('python', when='+python')
depends_on('readline', when='+readline')
depends_on('xz')
depends_on('zlib')
depends_on('curl')
depends_on('krb5', when='+krb5')
depends_on('json-c')
extends('python', when='+python')
patch('python-support.patch', level=1, when='@:4.8+python')
# do not use systemd
patch('no-systemd.patch')
def patch(self):
"""Remove hardcoded -std=c++0x flag
"""
if self.spec.satisfies('@4.7.0:'):
filter_file(r'\-std=c\+\+0x', r'', 'cmake/XRootDOSDefs.cmake')
def cmake_args(self):
spec = self.spec
options = [
'-DENABLE_HTTP:BOOL={0}'.
format('ON' if '+http' in spec else 'OFF'),
'-DENABLE_PYTHON:BOOL={0}'.
format('ON' if '+python' in spec else 'OFF'),
'-DENABLE_READLINE:BOOL={0}'.
format('ON' if '+readline' in spec else 'OFF'),
'-DENABLE_KRB5:BOOL={0}'.
format('ON' if '+krb5' in spec else 'OFF'),
'-DENABLE_CEPH:BOOL=OFF'
]
# see https://github.com/spack/spack/pull/11581
if '+python' in self.spec:
options.append('-DPYTHON_EXECUTABLE=%s' %
spec['python'].command.path)
return options
def setup_build_environment(self, env):
cxxstdflag = ''
if self.spec.variants['cxxstd'].value == '98':
cxxstdflag = self.compiler.cxx98_flag
elif self.spec.variants['cxxstd'].value == '11':
cxxstdflag = self.compiler.cxx11_flag
elif self.spec.variants['cxxstd'].value == '14':
cxxstdflag = self.compiler.cxx14_flag
elif self.spec.variants['cxxstd'].value == '17':
cxxstdflag = self.compiler.cxx17_flag
else:
# The user has selected a (new?) legal value that we've
# forgotten to deal with here.
tty.die(
"INTERNAL ERROR: cannot accommodate unexpected variant ",
"cxxstd={0}".format(self.spec.variants['cxxstd'].value))
if cxxstdflag:
env.append_flags('CXXFLAGS', cxxstdflag)
| 47.248062
| 96
| 0.690074
|
4df98a48c913340552c73e8f4310391d2608a348
| 25,235
|
py
|
Python
|
Python/klampt/vis/backends/vis_glut.py
|
mass2010chromium/Klampt
|
4a50ac10daf636e4f2d7acb635db2292fc2c72b6
|
[
"BSD-3-Clause"
] | null | null | null |
Python/klampt/vis/backends/vis_glut.py
|
mass2010chromium/Klampt
|
4a50ac10daf636e4f2d7acb635db2292fc2c72b6
|
[
"BSD-3-Clause"
] | null | null | null |
Python/klampt/vis/backends/vis_glut.py
|
mass2010chromium/Klampt
|
4a50ac10daf636e4f2d7acb635db2292fc2c72b6
|
[
"BSD-3-Clause"
] | null | null | null |
from ..visualization import _WindowManager,_ThreadedWindowManager,_globalLock,VisualizationScene
from .vis_gl import GLVisualizationFrontend,GLVisualizationPlugin,WindowInfo
from .. import glinit,gldraw,glcommon
from ...robotsim import WorldModel,RobotModel
import threading
if not glinit.available('GLUT'):
raise ImportError("Can't import vis_glut without first calling glinit.init()")
from OpenGL.GL import *
from OpenGL.GLUT import *
import time
import weakref
class GLUTWindowManager(_ThreadedWindowManager):
def __init__(self):
self._frontend = GLUTVisualizationFrontend(None)
#a list of WorldModel indices in the current window. A world cannot be used in multiple simultaneous
#windows in GLUT. If a world is reused with a different window, its display lists will be refreshed.
self.current_worlds = []
#list of WindowInfo's
self.windows = []
#the index of the current window
self.current_window = None
#the name of a window, if no windows exist yet
self.window_title = "Klamp't visualizer (%s)"%(sys.argv[0],)
#a callback sent to run
self.callback = None
#the current temp frontend if len(self.windows)=0, or windows[current_window].frontend
_ThreadedWindowManager.__init__(self)
def reset(self):
_ThreadedWindowManager.reset(self)
self.cleanup()
def run_app_thread(self,callback=None):
global _globalLock
assert not self.vis_thread_running,"Can't run a new GLUT thread, a thread is already running"
self.vis_thread_running = True
if len(self.windows)==0:
self.windows.append(WindowInfo(self.window_title,self._frontend))
self.current_window = 0
winfo = self.windows[self.current_window]
winfo.mode = 'shown'
winfo.worlds = self.current_worlds
winfo.active_worlds = self.current_worlds[:]
glinit._GLBackend.initialize("Klamp't visualization")
winfo = self.windows[self.current_window]
print("GLUTWindowManager.run_app_thread: creating window with name",winfo.name,"and status",winfo.mode)
w = glinit._GLBackend.createWindow(winfo.name)
self._frontend.windowinfo = weakref.proxy(winfo)
self._frontend.window_manager = weakref.proxy(self)
self._frontend.name = winfo.name
w.setProgram(self._frontend)
winfo.glwindow = w
self.callback = callback
print("Windows",[winfo.name for winfo in self.windows])
glinit._GLBackend.run()
print("GLUTWindowManager.run_app_thread: Visualization thread closing...")
self.cleanup()
self.vis_thread_running = False
print("GLUTWindowManager.run_app_thread: terminating.")
return
def cleanup(self):
print("GLUTWindowManager.cleanup()")
for w in self.windows:
w.frontend.scene.clear()
w.worlds = []
w.active_worlds = []
#for some reason, destroying windows causes everything to terminate early
if w.glwindow:
print("GLUTWindowManager: destroying window",w.glwindow.glutWindowID)
#glutDestroyWindow(w.glwindow.glutWindowID)
w.glwindow = None
self._frontend = GLUTVisualizationFrontend(None)
self.current_worlds = []
self.windows = []
self.current_window = None
self.window_title = "Klamp't visualizer (%s)"%(sys.argv[0],)
self.callback = None
def frontend(self):
return self._frontend
def scene(self):
return self._frontend.scene
def getWindowName(self):
return self.window_title
def setWindowName(self,title):
self.window_title = title
self.onFrontendChange()
def resizeWindow(self,w,h):
self._frontend.reshape(w,h)
def createWindow(self,title):
if len(self.windows) == 0:
#save the defaults in window 0
self.windows.append(WindowInfo(self.window_title,self._frontend))
self.windows[-1].worlds = self.current_worlds
self.windows[-1].active_worlds = self.current_worlds[:]
if title is None:
title = "Window "+str(len(self.windows))
#make a new window
self._frontend = GLUTVisualizationFrontend(None)
self._frontend.window_manager = weakref.proxy(self)
self.windows.append(WindowInfo(title,self._frontend))
self.window_title = title
print("GLUTWindowManager.createWindow: window title",self.window_title,", id",len(self.windows)-1)
self.current_worlds = []
id = len(self.windows)-1
self.current_window = id
return id
def setWindow(self,id):
if id == self.current_window:
return
assert id >= 0 and id < len(self.windows),"Invalid window id"
self._frontend = self.windows[id].frontend
self.current_worlds = self.windows[id].worlds
self.window_title = self.windows[id].name
#print "vis.setWindow(",id,") the window has status",_windows[id].mode
#PyQt interface allows sharing display lists but GLUT does not.
#refresh all worlds' display lists that were once active.
for w in self.current_worlds:
if w in self.windows[self.current_window].active_worlds:
print("klampt.vis.setWindow(): world",w,"becoming active in the new window",id)
for item in self.windows[self.current_window].worldDisplayListItems[w]:
self._refreshDisplayLists(item)
self.windows[self.current_window].active_worlds.remove(w)
self.windows[id].active_worlds = self.current_worlds[:]
self.current_window = id
def getWindow(self):
return self.current_window
def setPlugin(self,plugin):
if not isinstance(self._frontend,GLUTVisualizationFrontend):
#was multi-view -- now setting plugin
self._frontend = GLUTVisualizationFrontend()
if self.current_window is not None:
if self.windows[self.current_window].glwindow is not None:
self._frontend.window = self.windows[self.current_window].glwindow
if plugin is None:
self._frontend.setPlugin(self._frontend.scene)
else:
self._frontend.setPlugin(plugin)
if hasattr(plugin,'world'):
self._checkWindowCurrent(plugin.world)
self.onFrontendChange()
def pushPlugin(self,plugin):
assert isinstance(self._frontend,glcommon.GLPluginProgram),"Can't push a plugin after splitView"
if len(self._frontend.plugins) == 0:
self._frontend.setPlugin(self._frontend.scene)
self._frontend.pushPlugin(plugin)
self.onFrontendChange()
def popPlugin(self):
self._frontend.popPlugin()
self.onFrontendChange()
def splitView(self,plugin):
#create a multi-view widget
if plugin is None:
plugin = GLVisualizationPlugin()
if isinstance(self._frontend,glcommon.GLMultiViewportProgram):
self._frontend.add_view(plugin)
if hasattr(plugin,'scene') and isinstance(plugin.scene,VisualizationScene):
self._frontend.scene = plugin.scene
else:
if len(self._frontend.plugins) == 0:
self.setPlugin(None)
multiProgram = GLUTMultiWindowVisualizationFrontend(None)
multiProgram.windowinfo = weakref.proxy(self.windows[self.current_window])
multiProgram.window = None
if self.current_window is not None:
if self.windows[self.current_window].glwindow is not None:
multiProgram.window = self.windows[self.current_window].glwindow
multiProgram.add_view(self._frontend)
multiProgram.add_view(plugin)
multiProgram.name = self.window_title
self._frontend = multiProgram
multiProgram.scene = self._frontend
if hasattr(plugin,'scene') and isinstance(plugin.scene,VisualizationScene):
multiProgram.scene = plugin.scene
if isinstance(plugin,GLVisualizationPlugin):
plugin.program = weakref.proxy(self._frontend.views[-1])
self.onFrontendChange()
def unlock(self):
_ThreadedWindowManager.unlock(self)
self.update()
def update(self):
for w in self.windows:
if w.glwindow:
w.doRefresh = True
def show(self):
if len(self.windows)==0:
self.windows.append(WindowInfo(self.window_title,self._frontend))
self.current_window = 0
print("First show(), window title",self.window_title)
winfo = self.windows[self.current_window]
winfo.mode = 'shown'
winfo.worlds = self.current_worlds
winfo.active_worlds = self.current_worlds[:]
if not self.vis_thread_running:
print("GLUTWindowManager.show(): first window shown, starting the visualization thread")
self._start_app_thread()
def shown(self):
return (self.vis_thread_running and self.current_window is not None and self.windows[self.current_window].mode in ['shown','dialog'])
def hide(self):
if self.current_window is None:
return
self.windows[self.current_window].mode = 'hidden'
def dialog(self):
global _globalLock
if len(self.windows)==0:
self.windows.append(WindowInfo(self.window_title,self._frontend))
self.current_window = 0
w = self.windows[self.current_window]
w.mode = 'dialog'
w.worlds = self.current_worlds
w.active_worlds = self.current_worlds[:]
if self.multithreaded():
print("#########################################")
print("klampt.vis: Running multi-threaded dialog, waiting to complete...")
if not self.vis_thread_running:
self._start_app_thread()
while w.mode == 'dialog':
time.sleep(0.1)
print("klampt.vis: ... dialog done.")
print("#########################################")
return None
else:
print("#########################################")
print("klampt.vis: Running single-threaded dialog")
self.in_vis_loop = True
res = self.run_app_thread()
self._in_vis_loop = False
print("klampt.vis: ... dialog done.")
print("#########################################")
return res
def set_custom_ui(self,func):
if len(self.windows)==0:
print("Making first window for custom ui")
self.windows.append(WindowInfo(self.window_title,self._frontend))
self.current_window = 0
self.windows[self.current_window].custom_ui = func
print("klampt.vis: setting custom ui on window",self.current_window)
return
def onFrontendChange(self):
if self.current_window is None:
return
w = self.windows[self.current_window]
w.doReload = True
w.frontend = self._frontend
if w.glwindow:
w.glwindow.reshape(self._frontend.view.w,self._frontend.view.h)
if w.name != self.window_title:
glutSetWindow(w.glwindow.glutWindowID)
glutSetWindowTitle(self.window_title)
w.name = self.window_title
def _refreshDisplayLists(self,item):
if isinstance(item,WorldModel):
for i in range(item.numRobots()):
self._refreshDisplayLists(item.robot(i))
for i in range(item.numRigidObjects()):
self._refreshDisplayLists(item.rigidObject(i))
for i in range(item.numTerrains()):
self._refreshDisplayLists(item.terrain(i))
elif isinstance(item,RobotModel):
for i in range(item.numLinks()):
self._refreshDisplayLists(item.link(i))
elif hasattr(item,'appearance'):
item.appearance().refresh(False)
def _checkWindowCurrent(self,item):
#print("Checking whether item",item,"is current in the context of window",self.current_window)
#print("Current worlds",self.current_worlds)
#print("Current window's active worlds",self.windows[self.current_window].active_worlds)
if isinstance(item,WorldModel):
if item.index not in self.current_worlds:
#PyQt interface allows sharing display lists but GLUT does not.
#refresh all worlds' display lists that will be shifted to the current window.
for i,win in enumerate(self.windows):
#print("Window",i,"active worlds",win.active_worlds)
if item.index in win.active_worlds:
#GLUT SPECIFIC
print("klampt.vis: world",item.index,"was shown in a different window, now refreshing display lists")
self._refreshDisplayLists(item)
win.active_worlds.remove(item.index)
self.current_worlds.append(item.index)
if self.current_window is not None:
self.windows[self.current_window].worldDisplayListItems[item.index].append(weakref.proxy(item))
#print("klampt.vis: world added to the visualization's world (items:",self.current_worlds,")")
#else:
# print("klampt.vis: world",item,"is already in the current window's world")
elif hasattr(item,'world'):
if isinstance(item.world,WorldModel):
return self._checkWindowCurrent(item.world)
if isinstance(item.world,int):
if item.world < 0:
return
if item.world not in self.current_worlds:
for i,win in enumerate(self.windows):
#print("Window",i,"active worlds",win.active_worlds)
if item.world in win.active_worlds:
#GLUT SPECIFIC
print("klampt.vis: world",item.index,"was shown in a different window, now refreshing display lists")
self._refreshDisplayLists(item)
win.active_worlds.remove(item.world)
self.current_worlds.append(item.world)
if self.current_window is not None:
self.windows[self.current_window].worldDisplayListItems[item.index].append(weakref.proxy(item))
#print("klampt.vis: world added to the visualization's world (items:",self.current_worlds,")")
def do_idle_checks(self):
#print("GLUTWindowManager.idle checks")
if self.quit:
if bool(glutLeaveMainLoop):
glutLeaveMainLoop()
else:
for w in self.windows:
w.close()
w.glwindow = None
return
for windex,winfo in enumerate(self.windows):
#print(winfo.name,winfo.glwindow,winfo.mode)
if winfo.glwindow is None and winfo.mode in ['shown','dialog']:
print("GLUTWindowManager: Launching window %d inside vis thread"%(windex,))
w = glinit._GLBackend.createWindow(winfo.name)
self._frontend.windowinfo = weakref.proxy(winfo)
self._frontend.window_manager = weakref.proxy(self)
self._frontend.name = winfo.name
w.setProgram(self._frontend)
winfo.glwindow = w
w.initialize()
if not winfo.frontend.hidden:
if winfo.mode == 'hidden':
print("GLUTWindowManager: hiding window %d (%s)"%(windex,winfo.name))
winfo.frontend.hidden = True
if winfo.glwindow is not None:
glutSetWindow(winfo.glwindow.glutWindowID)
glutHideWindow()
else:
#print("hidden, waiting...",self.windowinfo.mode)
if winfo.mode == 'shown':
print("GLUTWindowManager: showing window %d (%s)"%(windex,winfo.name))
print("GLUT ID",winfo.glwindow.glutWindowID)
glutSetWindow(winfo.glwindow.glutWindowID)
glutShowWindow()
winfo.frontend.hidden = False
elif winfo.mode == 'dialog':
print("GLUTWindowManager: showing window %d (%s) in dialog mode"%(windex,winfo.name))
print("GLUT ID",winfo.glwindow.glutWindowID)
winfo.frontend.inDialog = True
glutSetWindow(winfo.glwindow.glutWindowID)
glutShowWindow()
winfo.frontend.hidden = False
if self.in_vis_loop and (len(self.windows)==0 or all(w.mode == 'hidden' for w in self.windows)):
print("klampt.vis: No windows shown, breaking out of vis loop")
if bool(glutLeaveMainLoop):
glutLeaveMainLoop()
else:
while glutGetWindow():
for w in self.windows:
w.close()
for w in self.windows:
w.glwindow = None
return
self.in_app_thread = True
calls = self.threadcalls
self.threadcalls = []
for c in calls:
c()
if self.callback:
self.callback()
self.in_app_thread = False
return
def screenshot(self,format,want_depth):
if threading.current_thread().__class__.__name__ != '_MainThread':
#already in visualization loop -- just get the image
return self._frontend.get_screen(format,want_depth)
return_values = []
def storeScreenshot(img,depth=None,return_values=return_values):
return_values.append((img,depth))
self.screenshotCallback(storeScreenshot,format,want_depth)
#wait for the vis thread to call the function
while len(return_values)==0:
time.sleep(0.01)
res = return_values[0]
if not want_depth:
return res[0]
else:
return res
def screenshotCallback(self,fn,format,want_depth):
if threading.current_thread().__class__.__name__ != '_MainThread':
#already in visualization loop -- just get the image
res = self._frontend.get_screen(format,want_depth)
if want_depth:
fn(*res)
else:
fn(res)
def do_screenshot_callback(fn=fn,format=format,want_depth=want_depth):
res = self._frontend.get_screen(format,want_depth)
if want_depth:
fn(*res)
else:
fn(res)
self.threadCall(do_screenshot_callback)
class GLUTVisualizationFrontend(GLVisualizationFrontend):
def __init__(self,windowinfo):
GLVisualizationFrontend.__init__(self)
self.scene = GLUTVisualizationPlugin()
self.setPlugin(self.scene)
self.scene.program = weakref.proxy(self)
self.windowinfo = windowinfo
self.window_manager = None
self.inDialog = False
self.hidden = False
self.inSubwindow = False
def display(self):
global _globalLock
_globalLock.acquire()
GLVisualizationFrontend.display(self)
_globalLock.release()
return True
def display_screen(self):
global _globalLock
_globalLock.acquire()
GLVisualizationFrontend.display_screen(self)
_globalLock.release()
if self.inSubwindow:
return
glDisable(GL_LIGHTING)
glColor3f(1,1,1)
y = 30
glRasterPos(20,y)
gldraw.glutBitmapString(GLUT_BITMAP_HELVETICA_18,"(Do not close this window except to quit)")
y += 25
if self.inDialog:
glColor3f(1,1,0)
glRasterPos(20,y)
gldraw.glutBitmapString(GLUT_BITMAP_HELVETICA_18,"In Dialog mode. Press 'Esc' to return to normal mode")
y += 25
else:
glColor3f(1,1,0)
glRasterPos(20,y)
gldraw.glutBitmapString(GLUT_BITMAP_HELVETICA_18,"In Window mode. Press 'Esc' to hide window")
y += 25
for a in self.actions:
if a.key is not None:
glColor3f(0,0,0)
glRasterPos(20,y)
desc = a.short_text
if a.description is not None and a.description != a.short_text:
desc = desc + ". "+a.description
gldraw.glutBitmapString(GLUT_BITMAP_HELVETICA_12,a.key+": "+desc)
y += 14
def keyboardfunc(self,c,x,y):
if not self.inSubwindow:
if len(c)==1 and ord(c)==27:
if self.inDialog:
print("Esc pressed, hiding dialog")
self.inDialog = False
else:
print("Esc pressed, hiding window")
global _globalLock
_globalLock.acquire()
self.windowinfo.mode = 'hidden'
_globalLock.release()
return True
if isinstance(c,bytes):
c = c.decode('utf-8')
for a in self.actions:
if a.key is None:
continue
if a.key.startswith('Ctrl'):
if 'ctrl' in self.modifiers():
if a.key[5:] == c:
a.hook()
elif a.key.startswith('Shift'):
if 'shift' in self.modifiers():
if a.key[6:] == c:
a.hook()
elif a.key.startswith('Alt'):
if 'alt' in self.modifiers():
if a.key[4:] == c:
a.hook()
elif a.key == c:
a.hook()
else:
return GLVisualizationFrontend.keyboardfunc(self,c,x,y)
def idlefunc(self):
global _globalLock
_globalLock.acquire()
self.window_manager.do_idle_checks()
_globalLock.release()
return GLVisualizationFrontend.idlefunc(self)
class GLUTMultiWindowVisualizationFrontend(glcommon.GLMultiViewportProgram):
def __init__(self,windowinfo):
glcommon.GLMultiViewportProgram.__init__(self)
self.windowinfo = windowinfo
self.window_manager = None
self.inDialog = False
self.hidden = False
self.inSubwindow = False
def addView(self,view):
warnings.warn("addView will be deprecated in favor of add_view in a future version of Klampt",DeprecationWarning)
self.add_view(view)
def add_view(self,view):
if isinstance(view,(GLUTVisualizationFrontend,GLUTMultiWindowVisualizationFrontend)):
view.inSubwindow = True
glcommon.GLMultiViewportProgram.add_view(self,view)
def display_screen(self):
glcommon.GLMultiViewportProgram.display_screen(self)
if not self.inSubwindow:
glDisable(GL_LIGHTING)
glColor3f(1,1,1)
glRasterPos(20,50)
gldraw.glutBitmapString(GLUT_BITMAP_HELVETICA_18,"(Do not close this window except to quit)")
if self.inDialog:
glColor3f(1,1,0)
glRasterPos(20,80)
gldraw.glutBitmapString(GLUT_BITMAP_HELVETICA_18,"In Dialog mode. Press 'Esc' to return to normal mode")
else:
glColor3f(1,1,0)
glRasterPos(20,80)
gldraw.glutBitmapString(GLUT_BITMAP_HELVETICA_18,"In Window mode. Press 'Esc' to hide window")
def keyboardfunc(self,c,x,y):
if len(c)==1 and ord(c)==27:
if self.inDialog:
print("Esc pressed, hiding dialog")
self.inDialog = False
else:
print("Esc pressed, hiding window")
global _globalLock
_globalLock.acquire()
self.windowinfo.mode = 'hidden'
_globalLock.release()
return True
else:
return glcommon.GLMultiViewportProgram.keyboardfunc(self,c,x,y)
def idlefunc(self):
global _globalLock
_globalLock.acquire()
self.window_manager.do_idle_checks()
_globalLock.release()
return glcommon.GLMultiViewportProgram.idlefunc(self)
class GLUTVisualizationPlugin(GLVisualizationPlugin):
def __init__(self):
GLVisualizationPlugin.__init__(self)
def add(self,name,item,keepAppearance=False,**kwargs):
GLVisualizationPlugin.add(self,name,item,keepAppearance,**kwargs)
#need to check whether the item is part of the current GLUT world
if self.program and self.program.window_manager:
self.program.window_manager._checkWindowCurrent(item)
| 42.989779
| 141
| 0.596751
|
a1cd0030e725be749fe4f824562d7161c89c51d7
| 5,456
|
py
|
Python
|
skills/grounding_skill/utils.py
|
stefanrer/commonbsecret
|
bb527f9b3e460124ccc307c0d39baba9a2490fcd
|
[
"Apache-2.0"
] | null | null | null |
skills/grounding_skill/utils.py
|
stefanrer/commonbsecret
|
bb527f9b3e460124ccc307c0d39baba9a2490fcd
|
[
"Apache-2.0"
] | null | null | null |
skills/grounding_skill/utils.py
|
stefanrer/commonbsecret
|
bb527f9b3e460124ccc307c0d39baba9a2490fcd
|
[
"Apache-2.0"
] | null | null | null |
import json
import random
import re
from common.grounding import COMPLIMENT_PROPERTIES
from common.utils import MIDAS_SEMANTIC_LABELS
INTENT_DICT = {
"Information_DeliveryIntent": "You just told me about ENTITY_NAME, right?",
"Information_RequestIntent": "You've asked me about ENTITY_NAME haven't you?",
"User_InstructionIntent": "You just gave me a command. Am I right?",
"Opinion_ExpressionIntent": "You just shared your opinion about ENTITY_NAME with me, right?",
"ClarificationIntent": "You clarified me what you've just said about ENTITY_NAME, right?",
"Topic_SwitchIntent": "You wanted to change topic, right?",
"Opinion_RequestIntent": "You wanted to hear my thoughts about ENTITY_NAME, am I correct?",
}
DA_TOPIC_DICT = {
"Entertainment_Movies": "We were discussing movies, am I right?",
"Entertainment_Books": "We were discussing books, am I right?",
"Entertainment_General": "We are just trying to be polite to each other, aren't we?",
"Science_and_Technology": "I was under impression we were chatting about technology stuff.",
"Sports": "So I thought we were talking about sports.",
"Politics": "Correct me if I'm wrong but I thought we were discussing politics.",
}
COBOT_TOPIC_DICT = {
"Phatic": "We are just trying to be polite to each other, aren't we?",
"Other": "I can't figure out what we are talking about exactly. Can you spare a hand?",
"Movies_TV": "We were discussing movies, am I right?",
"Music": "Thought we were talking about music.",
"SciTech": "I was under impression we were chatting about technology stuff.",
"Literature": "We were discussing literature, am I right?",
"Travel_Geo": "Thought we were talking about some travel stuff.",
"Celebrities": "We're discussing celebrities, right?",
"Games": "We're talking about games, correct?",
"Pets_Animals": "Thought we were talking about animals.",
"Sports": "So I thought we were talking about sports.",
"Psychology": "Correct me if I'm wrong but I thought we were talking about psychology.",
"Religion": "Aren't we talking about religion, my dear?",
"Weather_Time": "Aren't we discussing the best topic of all times, weather?",
"Food_Drink": "Thought we were discussing food stuff.",
"Politics": "Correct me if I'm wrong but I thought we were discussing politics.",
"Sex_Profanity": "This is a something I'd rather avoid talking about.",
"Art_Event": "My understanding is we are discussing arts, aren't we?",
"Math": "My guess is we were talking about math stuff.",
"News": "Aren't we discussing news my dear friend?",
"Entertainment": "Thought we were discussing something about entertainment.",
"Fashion": "We are talking about fashion am I right?",
}
def get_entity_name(annotations):
entity_list = []
for tmp in annotations.get("ner", []):
if len(tmp) > 0 and "text" in tmp[0]:
entity_list.append(tmp[0]["text"])
if len(entity_list) == 1:
entity_name = entity_list[0]
elif len(entity_list) > 1:
entity_name = ",".join(entity_list[:-1]) + " and " + entity_list[-1]
else:
entity_name = ""
entity_name = entity_name.replace("?", "")
return entity_name
with open("midas_acknowledgements.json", "r") as f:
MIDAS_INTENT_ACKNOWLEDGMENTS = json.load(f)
MIDAS_INTENT_ANALOGUES = {
"open_question_opinion": ["open_question_opinion", "Opinion_RequestIntent"],
"open_question_factual": ["open_question_factual", "Information_RequestIntent"],
"open_question_personal": ["open_question_personal"],
"yes_no_question": ["yes_no_question"],
}
def get_midas_analogue_intent_for_any_intent(intent):
for midas_intent_name in MIDAS_INTENT_ANALOGUES:
if intent in MIDAS_INTENT_ANALOGUES[midas_intent_name]:
return midas_intent_name
if intent in MIDAS_SEMANTIC_LABELS:
return intent
return None
def get_midas_intent_acknowledgement(intent, entity_name):
pos_responses = MIDAS_INTENT_ACKNOWLEDGMENTS.get(intent, [])
if pos_responses:
response = random.choice(pos_responses).replace("SUBJECT", entity_name)
response = response.replace("PROPERTY", random.choice(COMPLIMENT_PROPERTIES).lower())
else:
response = ""
response = response.replace("?", "")
return response
def reformulate_question_to_statement(question):
statement = question.lower()
# do you have any dogs -> whether I have any dogs
statement = re.sub(r"^do you", "whether I", statement)
# why do you have three dogs -> why I have three dogs
statement = re.sub(r"\bdo you\b", "I", statement)
# are you kidding -> whether I'm kidding
statement = re.sub(r"^are you", "whether I'M", statement)
# why are you laughing -> why I'm laughing
statement = re.sub(r"\bare you\b", "I'M", statement)
# what are yours favorite colors -> what are my favorite colors
statement = re.sub(r"\byours\b", "MY", statement)
# what is your favorite color -> what is my favorite color
statement = re.sub(r"\byour\b", "MY", statement)
# can you tell me jokes -> can I tell me jokes
statement = re.sub(r"\byou\b", "I", statement)
# can you tell me jokes -> can I tell you jokes
statement = re.sub(r"\bme\b", "YOU", statement)
# can you say my name -> can I say your name
statement = re.sub(r"\bmy\b", "YOUR", statement)
return statement.lower()
| 43.648
| 97
| 0.692082
|
804bbc848254e16c75626fd1b721170f29b2749c
| 2,538
|
py
|
Python
|
aws_ping_login.py
|
surfkansas/aws-ping-login
|
0bc95fff98fba04abcdb3256d56d74dffdb0e36a
|
[
"MIT"
] | 1
|
2018-06-26T21:06:16.000Z
|
2018-06-26T21:06:16.000Z
|
aws_ping_login.py
|
surfkansas/aws-ping-login
|
0bc95fff98fba04abcdb3256d56d74dffdb0e36a
|
[
"MIT"
] | 5
|
2018-06-15T22:27:24.000Z
|
2020-05-01T20:42:14.000Z
|
aws_ping_login.py
|
surfkansas/aws-ping-login
|
0bc95fff98fba04abcdb3256d56d74dffdb0e36a
|
[
"MIT"
] | 1
|
2018-06-15T22:29:35.000Z
|
2018-06-15T22:29:35.000Z
|
import argparse
import os
import wx
import wx.html2
import boto3
from bs4 import BeautifulSoup
class MyBrowser(wx.Frame):
def __init__(self, *args, **kwds):
wx.Frame.__init__(self, *args, **kwds)
sizer = wx.BoxSizer(wx.VERTICAL)
self.browser = wx.html2.WebView.New(self)
sizer.Add(self.browser, 1, wx.EXPAND, 10)
self.SetSizer(sizer)
self.SetSize((700, 700))
self.Bind(wx.html2.EVT_WEBVIEW_LOADED, self.OnNavigate, self.browser)
def OnNavigate(self,evt):
if self.browser.GetCurrentURL() == 'https://signin.aws.amazon.com/saml':
page_source = self.browser.GetPageSource()
self.Close()
soup = BeautifulSoup(page_source, 'html.parser')
base64saml = soup.find('input', {'name': 'SAMLResponse'})['value']
self.saml = base64saml
def execute_login_dialog(args):
app = wx.App()
dialog = MyBrowser(None, -1)
dialog.browser.LoadURL(args.sso_url)
dialog.Show()
app.MainLoop()
return dialog.saml
def process_login(args):
saml = execute_login_dialog(args)
role_arn = 'arn:aws:iam::{0}:role/{1}'.format(args.account, args.role)
principal_arn = 'arn:aws:iam::{0}:saml-provider/{1}'.format(args.account, args.saml_provider)
session_duration = int(args.session_duration) * 60
sts = boto3.client('sts')
sts_session = sts.assume_role_with_saml(
RoleArn=role_arn,
PrincipalArn=principal_arn,
SAMLAssertion=saml,
DurationSeconds=session_duration
)
os.system('aws configure --profile {0} set aws_access_key_id {1}'.format(args.profile, sts_session['Credentials']['AccessKeyId']))
os.system('aws configure --profile {0} set aws_secret_access_key {1}'.format(args.profile, sts_session['Credentials']['SecretAccessKey']))
os.system('aws configure --profile {0} set aws_session_token {1}'.format(args.profile, sts_session['Credentials']['SessionToken']))
if args.region is not None:
os.system('aws configure --profile {0} set region {1}'.format(args.profile, args.region))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--sso-url')
parser.add_argument('--saml-provider')
parser.add_argument('--account')
parser.add_argument('--role')
parser.add_argument('--profile')
parser.add_argument('--region')
parser.add_argument('--session-duration')
args = parser.parse_args()
process_login(args)
if __name__ == "__main__":
main()
| 33.84
| 142
| 0.664303
|
ea923e73fe59de4025453f6868a8d300d99a0ad6
| 18,481
|
py
|
Python
|
keystone/common/kvs/core.py
|
trananhkma/keystone
|
1d34614121cbe694bfd107f1ce7a9c402d6a30b4
|
[
"Apache-2.0"
] | null | null | null |
keystone/common/kvs/core.py
|
trananhkma/keystone
|
1d34614121cbe694bfd107f1ce7a9c402d6a30b4
|
[
"Apache-2.0"
] | null | null | null |
keystone/common/kvs/core.py
|
trananhkma/keystone
|
1d34614121cbe694bfd107f1ce7a9c402d6a30b4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Metacloud, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import threading
import time
import weakref
from dogpile.cache import api
from dogpile.cache import proxy
from dogpile.cache import region
from dogpile.cache import util as dogpile_util
from dogpile.core import nameregistry
from oslo_log import log
from oslo_log import versionutils
from oslo_utils import importutils
from oslo_utils import reflection
import keystone.conf
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LI
from keystone.i18n import _LW
__all__ = ('KeyValueStore', 'KeyValueStoreLock', 'LockTimeout',
'get_key_value_store')
BACKENDS_REGISTERED = False
CONF = keystone.conf.CONF
KEY_VALUE_STORE_REGISTRY = weakref.WeakValueDictionary()
LOCK_WINDOW = 1
LOG = log.getLogger(__name__)
NO_VALUE = api.NO_VALUE
def _register_backends():
# NOTE(morganfainberg): This function exists to ensure we do not try and
# register the backends prior to the configuration object being fully
# available. We also need to ensure we do not register a given backend
# more than one time. All backends will be prefixed with openstack.kvs
# as the "short" name to reference them for configuration purposes. This
# function is used in addition to the pre-registered backends in the
# __init__ file for the KVS system.
global BACKENDS_REGISTERED
if not BACKENDS_REGISTERED:
prefix = 'openstack.kvs.%s'
for backend in CONF.kvs.backends:
module, cls = backend.rsplit('.', 1)
backend_name = prefix % cls
LOG.debug(('Registering Dogpile Backend %(backend_path)s as '
'%(backend_name)s'),
{'backend_path': backend, 'backend_name': backend_name})
region.register_backend(backend_name, module, cls)
BACKENDS_REGISTERED = True
def sha1_mangle_key(key):
"""Wrapper for dogpile's sha1_mangle_key.
Taken from oslo_cache.core._sha1_mangle_key
dogpile's sha1_mangle_key function expects an encoded string, so we
should take steps to properly handle multiple inputs before passing
the key through.
"""
try:
key = key.encode('utf-8', errors='xmlcharrefreplace')
except (UnicodeError, AttributeError): # nosec
# NOTE(stevemar): if encoding fails just continue anyway.
pass
return dogpile_util.sha1_mangle_key(key)
class LockTimeout(exception.UnexpectedError):
debug_message_format = _('Lock Timeout occurred for key, %(target)s')
class KeyValueStore(object):
"""Basic KVS manager object to support Keystone Key-Value-Store systems.
This manager also supports the concept of locking a given key resource to
allow for a guaranteed atomic transaction to the backend.
Deprecated as of Newton.
"""
@versionutils.deprecated(
versionutils.deprecated.NEWTON,
what='keystone key-value-store common code',
remove_in=+2)
def __init__(self, kvs_region):
self.locking = True
self._lock_timeout = 0
self._region = kvs_region
self._security_strategy = None
self._secret_key = None
self._lock_registry = nameregistry.NameRegistry(self._create_mutex)
def configure(self, backing_store, key_mangler=None, proxy_list=None,
locking=True, **region_config_args):
"""Configure the KeyValueStore instance.
:param backing_store: dogpile.cache short name of the region backend
:param key_mangler: key_mangler function
:param proxy_list: list of proxy classes to apply to the region
:param locking: boolean that allows disabling of locking mechanism for
this instantiation
:param region_config_args: key-word args passed to the dogpile.cache
backend for configuration
"""
if self.is_configured:
# NOTE(morganfainberg): It is a bad idea to reconfigure a backend,
# there are a lot of pitfalls and potential memory leaks that could
# occur. By far the best approach is to re-create the KVS object
# with the new configuration.
raise RuntimeError(_('KVS region %s is already configured. '
'Cannot reconfigure.') % self._region.name)
self.locking = locking
self._lock_timeout = region_config_args.pop(
'lock_timeout', CONF.kvs.default_lock_timeout)
self._configure_region(backing_store, **region_config_args)
self._set_key_mangler(key_mangler)
self._apply_region_proxy(proxy_list)
@property
def is_configured(self):
return 'backend' in self._region.__dict__
def _apply_region_proxy(self, proxy_list):
if isinstance(proxy_list, list):
proxies = []
for item in proxy_list:
if isinstance(item, str):
LOG.debug('Importing class %s as KVS proxy.', item)
pxy = importutils.import_class(item)
else:
pxy = item
if issubclass(pxy, proxy.ProxyBackend):
proxies.append(pxy)
else:
pxy_cls_name = reflection.get_class_name(
pxy, fully_qualified=False)
LOG.warning(_LW('%s is not a dogpile.proxy.ProxyBackend'),
pxy_cls_name)
for proxy_cls in reversed(proxies):
proxy_cls_name = reflection.get_class_name(
proxy_cls, fully_qualified=False)
LOG.info(_LI('Adding proxy \'%(proxy)s\' to KVS %(name)s.'),
{'proxy': proxy_cls_name,
'name': self._region.name})
self._region.wrap(proxy_cls)
def _assert_configured(self):
if'backend' not in self._region.__dict__:
raise exception.UnexpectedError(_('Key Value Store not '
'configured: %s'),
self._region.name)
def _set_keymangler_on_backend(self, key_mangler):
try:
self._region.backend.key_mangler = key_mangler
except Exception as e:
# NOTE(morganfainberg): The setting of the key_mangler on the
# backend is used to allow the backend to
# calculate a hashed key value as needed. Not all backends
# require the ability to calculate hashed keys. If the
# backend does not support/require this feature log a
# debug line and move on otherwise raise the proper exception.
# Support of the feature is implied by the existence of the
# 'raw_no_expiry_keys' attribute.
if not hasattr(self._region.backend, 'raw_no_expiry_keys'):
LOG.debug(('Non-expiring keys not supported/required by '
'%(region)s backend; unable to set '
'key_mangler for backend: %(err)s'),
{'region': self._region.name, 'err': e})
else:
raise
def _set_key_mangler(self, key_mangler):
# Set the key_mangler that is appropriate for the given region being
# configured here. The key_mangler function is called prior to storing
# the value(s) in the backend. This is to help prevent collisions and
# limit issues such as memcache's limited cache_key size.
use_backend_key_mangler = getattr(self._region.backend,
'use_backend_key_mangler', False)
if ((key_mangler is None or use_backend_key_mangler) and
(self._region.backend.key_mangler is not None)):
# NOTE(morganfainberg): Use the configured key_mangler as a first
# choice. Second choice would be the key_mangler defined by the
# backend itself. Finally, fall back to the defaults. The one
# exception is if the backend defines `use_backend_key_mangler`
# as True, which indicates the backend's key_mangler should be
# the first choice.
key_mangler = self._region.backend.key_mangler
if CONF.kvs.enable_key_mangler:
if key_mangler is not None:
msg = _LI('Using %(func)s as KVS region %(name)s key_mangler')
if callable(key_mangler):
self._region.key_mangler = key_mangler
LOG.info(msg, {'func': key_mangler.__name__,
'name': self._region.name})
else:
# NOTE(morganfainberg): We failed to set the key_mangler,
# we should error out here to ensure we aren't causing
# key-length or collision issues.
raise exception.ValidationError(
_('`key_mangler` option must be a function reference'))
else:
msg = _LI('Using default keystone.common.kvs.sha1_mangle_key '
'as KVS region %s key_mangler')
LOG.info(msg, self._region.name)
# NOTE(morganfainberg): Use 'default' keymangler to ensure
# that unless explicitly changed, we mangle keys. This helps
# to limit unintended cases of exceeding cache-key in backends
# such as memcache.
self._region.key_mangler = sha1_mangle_key
self._set_keymangler_on_backend(self._region.key_mangler)
else:
LOG.info(_LI('KVS region %s key_mangler disabled.'),
self._region.name)
self._set_keymangler_on_backend(None)
def _configure_region(self, backend, **config_args):
prefix = CONF.kvs.config_prefix
conf_dict = {}
conf_dict['%s.backend' % prefix] = backend
if 'distributed_lock' not in config_args:
config_args['distributed_lock'] = True
config_args['lock_timeout'] = self._lock_timeout
# NOTE(morganfainberg): To mitigate race conditions on comparing
# the timeout and current time on the lock mutex, we are building
# in a static 1 second overlap where the lock will still be valid
# in the backend but not from the perspective of the context
# manager. Since we must develop to the lowest-common-denominator
# when it comes to the backends, memcache's cache store is not more
# refined than 1 second, therefore we must build in at least a 1
# second overlap. `lock_timeout` of 0 means locks never expire.
if config_args['lock_timeout'] > 0:
config_args['lock_timeout'] += LOCK_WINDOW
for argument, value in config_args.items():
arg_key = '.'.join([prefix, 'arguments', argument])
conf_dict[arg_key] = value
LOG.debug('KVS region configuration for %(name)s: %(config)r',
{'name': self._region.name, 'config': conf_dict})
self._region.configure_from_config(conf_dict, '%s.' % prefix)
def _mutex(self, key):
return self._lock_registry.get(key)
def _create_mutex(self, key):
mutex = self._region.backend.get_mutex(key)
if mutex is not None:
return mutex
else:
return self._LockWrapper(lock_timeout=self._lock_timeout)
class _LockWrapper(object):
"""weakref-capable threading.Lock wrapper."""
def __init__(self, lock_timeout):
self.lock = threading.Lock()
self.lock_timeout = lock_timeout
def acquire(self, wait=True):
return self.lock.acquire(wait)
def release(self):
self.lock.release()
def get(self, key):
"""Get a single value from the KVS backend."""
self._assert_configured()
value = self._region.get(key)
if value is NO_VALUE:
raise exception.NotFound(target=key)
return value
def get_multi(self, keys):
"""Get multiple values in a single call from the KVS backend."""
self._assert_configured()
values = self._region.get_multi(keys)
not_found = []
for index, key in enumerate(keys):
if values[index] is NO_VALUE:
not_found.append(key)
if not_found:
# NOTE(morganfainberg): If any of the multi-get values are non-
# existent, we should raise a NotFound error to mimic the .get()
# method's behavior. In all cases the internal dogpile NO_VALUE
# should be masked from the consumer of the KeyValueStore.
raise exception.NotFound(target=not_found)
return values
def set(self, key, value, lock=None):
"""Set a single value in the KVS backend."""
self._assert_configured()
with self._action_with_lock(key, lock):
self._region.set(key, value)
def set_multi(self, mapping):
"""Set multiple key/value pairs in the KVS backend at once.
Like delete_multi, this call does not serialize through the
KeyValueStoreLock mechanism (locking cannot occur on more than one
key in a given context without significant deadlock potential).
"""
self._assert_configured()
self._region.set_multi(mapping)
def delete(self, key, lock=None):
"""Delete a single key from the KVS backend.
This method will raise NotFound if the key doesn't exist. The get and
delete are done in a single transaction (via KeyValueStoreLock
mechanism).
"""
self._assert_configured()
with self._action_with_lock(key, lock):
self.get(key)
self._region.delete(key)
def delete_multi(self, keys):
"""Delete multiple keys from the KVS backend in a single call.
Like set_multi, this call does not serialize through the
KeyValueStoreLock mechanism (locking cannot occur on more than one
key in a given context without significant deadlock potential).
"""
self._assert_configured()
self._region.delete_multi(keys)
def get_lock(self, key):
"""Get a write lock on the KVS value referenced by `key`.
The ability to get a context manager to pass into the set/delete
methods allows for a single-transaction to occur while guaranteeing the
backing store will not change between the start of the 'lock' and the
end. Lock timeout is fixed to the KeyValueStore configured lock
timeout.
"""
self._assert_configured()
return KeyValueStoreLock(self._mutex(key), key, self.locking,
self._lock_timeout)
@contextlib.contextmanager
def _action_with_lock(self, key, lock=None):
"""Wrapper context manager.
Validates and handles the lock and lock timeout if passed in.
"""
if not isinstance(lock, KeyValueStoreLock):
# NOTE(morganfainberg): Locking only matters if a lock is passed in
# to this method. If lock isn't a KeyValueStoreLock, treat this as
# if no locking needs to occur.
yield
else:
if not lock.key == key:
raise ValueError(_('Lock key must match target key: %(lock)s '
'!= %(target)s') %
{'lock': lock.key, 'target': key})
if not lock.active:
raise exception.ValidationError(_('Must be called within an '
'active lock context.'))
if not lock.expired:
yield
else:
raise LockTimeout(target=key)
class KeyValueStoreLock(object):
"""Basic KeyValueStoreLock context manager.
Hooks into the dogpile.cache backend mutex allowing for distributed locking
on resources. This is only a write lock, and will not prevent reads from
occurring.
"""
def __init__(self, mutex, key, locking_enabled=True, lock_timeout=0):
self.mutex = mutex
self.key = key
self.enabled = locking_enabled
self.lock_timeout = lock_timeout
self.active = False
self.acquire_time = 0
def acquire(self):
if self.enabled:
self.mutex.acquire()
LOG.debug('KVS lock acquired for: %s', self.key)
self.active = True
self.acquire_time = time.time()
return self
__enter__ = acquire
@property
def expired(self):
if self.lock_timeout:
calculated = time.time() - self.acquire_time + LOCK_WINDOW
return calculated > self.lock_timeout
else:
return False
def release(self):
if self.enabled:
self.mutex.release()
if not self.expired:
LOG.debug('KVS lock released for: %s', self.key)
else:
LOG.warning(_LW('KVS lock released (timeout reached) for: %s'),
self.key)
def __exit__(self, exc_type, exc_val, exc_tb):
"""Release the lock."""
self.release()
def get_key_value_store(name, kvs_region=None):
"""Retrieve key value store.
Instantiate a new :class:`.KeyValueStore` or return a previous
instantiation that has the same name.
"""
global KEY_VALUE_STORE_REGISTRY
_register_backends()
key_value_store = KEY_VALUE_STORE_REGISTRY.get(name)
if key_value_store is None:
if kvs_region is None:
kvs_region = region.make_region(name=name)
key_value_store = KeyValueStore(kvs_region)
KEY_VALUE_STORE_REGISTRY[name] = key_value_store
return key_value_store
| 40.263617
| 79
| 0.624858
|
7ce5961a7bf3a4bc4b4d6e2480587b3238d68c96
| 2,675
|
py
|
Python
|
semantic-conventions/src/opentelemetry/semconv/model/utils.py
|
m3dbx/build-tools
|
b21390826c5a59ec10dea2cc68a87252c3638366
|
[
"Apache-2.0"
] | null | null | null |
semantic-conventions/src/opentelemetry/semconv/model/utils.py
|
m3dbx/build-tools
|
b21390826c5a59ec10dea2cc68a87252c3638366
|
[
"Apache-2.0"
] | null | null | null |
semantic-conventions/src/opentelemetry/semconv/model/utils.py
|
m3dbx/build-tools
|
b21390826c5a59ec10dea2cc68a87252c3638366
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from opentelemetry.semconv.model.exceptions import ValidationError
ID_RE = re.compile("([a-z](\\.?[a-z0-9_-]+)+)")
def validate_id(semconv_id, position):
if not ID_RE.fullmatch(semconv_id):
raise ValidationError.from_yaml_pos(
position,
"Invalid id {}. Semantic Convention ids MUST be {}".format(
semconv_id, ID_RE.pattern
),
)
def validate_values(yaml, keys, mandatory=()):
"""This method checks only valid keywords and value types are used"""
unwanted = [k for k in yaml.keys() if k not in keys]
if unwanted:
position = yaml.lc.data[unwanted[0]]
msg = "Invalid keys: {}".format(unwanted)
raise ValidationError.from_yaml_pos(position, msg)
if mandatory:
check_no_missing_keys(yaml, mandatory)
def check_no_missing_keys(yaml, mandatory):
missing = list(set(mandatory) - set(yaml))
if missing:
position = yaml.lc.data[list(yaml)[0]]
msg = "Missing keys: {}".format(missing)
raise ValidationError.from_yaml_pos(position, msg)
class ValidatableYamlNode:
allowed_keys = ()
mandatory_keys = ("id", "brief")
def __init__(self, yaml_node):
self.id = yaml_node.get("id").strip()
self.brief = str(yaml_node.get("brief")).strip()
self._position = [yaml_node.lc.line, yaml_node.lc.col]
@classmethod
def validate_keys(cls, node):
unwanted = [key for key in node.keys() if key not in cls.allowed_keys]
if unwanted:
position = node.lc.data[unwanted[0]]
msg = "Invalid keys: {}".format(unwanted)
raise ValidationError.from_yaml_pos(position, msg)
if cls.mandatory_keys:
check_no_missing_keys(node, cls.mandatory_keys)
def validate_values(self):
"""
Subclasses may provide additional validation.
This method should raise an exception with a descriptive
message if the semantic convention is not valid.
"""
validate_id(self.id, self._position)
| 33.024691
| 78
| 0.66243
|
9321c8a0f7b6c27a126c53eaa217cb2fc8bfcf3f
| 1,048
|
py
|
Python
|
app/models.py
|
JB-Tellez/flask-hello-world
|
7fce8dea357a847c90bce095b2bfc43036903e4d
|
[
"MIT"
] | null | null | null |
app/models.py
|
JB-Tellez/flask-hello-world
|
7fce8dea357a847c90bce095b2bfc43036903e4d
|
[
"MIT"
] | null | null | null |
app/models.py
|
JB-Tellez/flask-hello-world
|
7fce8dea357a847c90bce095b2bfc43036903e4d
|
[
"MIT"
] | null | null | null |
from sqlalchemy import inspect
from app import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
def __repr__(self):
return f'<User {self.username}>'
class ModelToDictMixin:
def to_dict(self, fields = None):
inst = inspect(self.__class__)
fields = fields or [c_attr.key for c_attr in inst.mapper.column_attrs]
as_dict = {}
for field in fields:
as_dict[field] = getattr(self, field)
return as_dict
class Location(ModelToDictMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
search_query = db.Column(db.String(512), unique=True)
formatted_query = db.Column(db.String(256))
latitude = db.Column(db.Float(10.7))
longitude = db.Column(db.Float(10.7))
def __repr__(self):
return f'<Location {self.formatted_query}>'
| 28.324324
| 78
| 0.65458
|
eb6ce2e7028560da8e0bc8cb6de0479abe264d6b
| 511
|
py
|
Python
|
python3/convert_to_base7.py
|
joshiaj7/CodingChallenges
|
f95dd79132f07c296e074d675819031912f6a943
|
[
"MIT"
] | 1
|
2020-10-08T09:17:40.000Z
|
2020-10-08T09:17:40.000Z
|
python3/convert_to_base7.py
|
joshiaj7/CodingChallenges
|
f95dd79132f07c296e074d675819031912f6a943
|
[
"MIT"
] | null | null | null |
python3/convert_to_base7.py
|
joshiaj7/CodingChallenges
|
f95dd79132f07c296e074d675819031912f6a943
|
[
"MIT"
] | null | null | null |
"""
Space : O(1)
Time : O(n)
"""
class Solution:
def convertToBase7(self, num: int) -> str:
ans = ''
multi = 1
isNeg = num < 0
num = abs(num)
while multi * 7 <= num:
multi *= 7
while multi >= 1:
ans += str(num // multi)
if num >= 0:
num = int(num % multi)
else:
ans += str(num)
multi //= 7
if isNeg:
ans = '-' + ans
return ans
| 17.033333
| 46
| 0.369863
|
b657ba3d09f7194dcdbae22eb1b695209990959f
| 3,564
|
py
|
Python
|
tensorflow/basic-rl/tutorial2/mouse.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1
|
2019-05-10T09:16:23.000Z
|
2019-05-10T09:16:23.000Z
|
tensorflow/basic-rl/tutorial2/mouse.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | null | null | null |
tensorflow/basic-rl/tutorial2/mouse.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1
|
2019-05-10T09:17:28.000Z
|
2019-05-10T09:17:28.000Z
|
import cellular
import qlearn
import time
import random
import shelve
directions = 8
def pickRandomLocation():
while 1:
x = random.randrange(world.width)
y = random.randrange(world.height)
cell = world.getCell(x, y)
if not (cell.wall or len(cell.agents) > 0):
return cell
class Cell(cellular.Cell):
wall = False
def colour(self):
if self.wall:
return 'black'
else:
return 'white'
def load(self, data):
if data == 'X':
self.wall = True
else:
self.wall = False
class Cat(cellular.Agent):
cell = None
score = 0
colour = 'orange'
def update(self):
cell = self.cell
if cell != mouse.cell:
self.goTowards(mouse.cell)
while cell == self.cell:
self.goInDirection(random.randrange(directions))
class Cheese(cellular.Agent):
colour = 'yellow'
def update(self):
pass
class Mouse(cellular.Agent):
colour = 'gray'
def __init__(self):
self.ai = None
self.ai = qlearn.QLearn(actions=range(directions),
alpha=0.1, gamma=0.9, epsilon=0.1)
self.eaten = 0
self.fed = 0
self.lastState = None
self.lastAction = None
def update(self):
state = self.calcState()
reward = -1
if self.cell == cat.cell:
self.eaten += 1
reward = -100
if self.lastState is not None:
self.ai.learn(self.lastState, self.lastAction, reward, state)
self.lastState = None
self.cell = pickRandomLocation()
return
if self.cell == cheese.cell:
self.fed += 1
reward = 50
cheese.cell = pickRandomLocation()
if self.lastState is not None:
self.ai.learn(self.lastState, self.lastAction, reward, state)
state = self.calcState()
action = self.ai.chooseAction(state)
self.lastState = state
self.lastAction = action
self.goInDirection(action)
def calcState(self):
if cat.cell is not None:
return self.cell.x, self.cell.y, cat.cell.x, cat.cell.y, cheese.cell.x, cheese.cell.y
else:
return self.cell.x, self.cell.y, cheese.cell.x, cheese.cell.y
mouse = Mouse()
cat = Cat()
cheese = Cheese()
world = cellular.World(Cell, directions=directions, filename='barrier2.txt')
world.age = 0
world.addAgent(cheese, cell=pickRandomLocation())
world.addAgent(cat)
world.addAgent(mouse)
epsilonx = (0,100000)
epsilony = (0.1,0)
epsilonm = (epsilony[1] - epsilony[0]) / (epsilonx[1] - epsilonx[0])
endAge = world.age + 150000
while world.age < endAge:
world.update()
if world.age % 100 == 0:
mouse.ai.epsilon = (epsilony[0] if world.age < epsilonx[0] else
epsilony[1] if world.age > epsilonx[1] else
epsilonm*(world.age - epsilonx[0]) + epsilony[0])
if world.age % 10000 == 0:
print "{:d}, e: {:0.2f}, W: {:d}, L: {:d}"\
.format(world.age, mouse.ai.epsilon, mouse.fed, mouse.eaten)
mouse.eaten = 0
mouse.fed = 0
mouse.ai.epsilon = 0.0 # change this to 0 to turn off exploration after learning
world.display.activate(size=30)
world.display.delay = 1
while 1:
world.update()
| 25.826087
| 98
| 0.549383
|
54019b648aff6d4cd7e77d3edc8b003b3821971e
| 72
|
py
|
Python
|
scrmbl/__init__.py
|
etienne-napoleone/scrmbl
|
e98fe2bfd378e703cfaaa7f1b9763c85d0b8177d
|
[
"MIT"
] | 95
|
2018-10-05T19:52:53.000Z
|
2021-10-04T14:59:55.000Z
|
scrmbl/__init__.py
|
etienne-napoleone/scrmbl
|
e98fe2bfd378e703cfaaa7f1b9763c85d0b8177d
|
[
"MIT"
] | 7
|
2018-10-05T16:36:43.000Z
|
2018-10-07T13:41:52.000Z
|
scrmbl/__init__.py
|
etienne-napoleone/scrmbl
|
e98fe2bfd378e703cfaaa7f1b9763c85d0b8177d
|
[
"MIT"
] | 5
|
2018-10-06T06:50:06.000Z
|
2018-10-11T16:29:49.000Z
|
from scrmbl.main import echo
__all__ = ['echo']
__version__ = '1.0.0'
| 12
| 28
| 0.680556
|
d7a9d65acac0e2b4338116ee29adf67e2ed46683
| 6,188
|
py
|
Python
|
models/controller.py
|
AlonsoReyes/t-intersection-graph
|
68bab234cd6e334edcec27bfee3e019f08997945
|
[
"MIT"
] | null | null | null |
models/controller.py
|
AlonsoReyes/t-intersection-graph
|
68bab234cd6e334edcec27bfee3e019f08997945
|
[
"MIT"
] | null | null | null |
models/controller.py
|
AlonsoReyes/t-intersection-graph
|
68bab234cd6e334edcec27bfee3e019f08997945
|
[
"MIT"
] | null | null | null |
def default_controller(car):
car.set_acceleration(0)
def accelerating_controller(car):
car.set_acceleration(car.maximum_acceleration)
# Returns the collision point that is closest to the car that owns the controller,
# the data of the car that is closest to that point and the ticks that car takes to pass that point
def get_closest_car(car):
from utils.utils import get_distance, collision_points # car_intention, car_lane, follow_intention, follow_lane
from functions.car_functions import ticks_to_pass_point
following_list = car.get_following_cars()
# get points that are possible collision points
possible_points = []
intention = car.get_intention()
lane = car.get_lane()
pos_x, pos_y = car.get_position()
for key in following_list:
following_car = following_list[key]
if None in list(following_car.values()):
continue
following_intention = following_car['intention']
following_lane = following_car['lane']
p_set = set(possible_points)
f_set = collision_points(car_intention=intention, car_lane=lane, follow_intention=following_intention,
follow_lane=following_lane)
common_points = set(f_set) - p_set
possible_points = possible_points + list(common_points)
possible_points = sorted(possible_points, key=lambda p: get_distance(p[0], p[1], pos_x, pos_y))
closest_point = None
if possible_points:
closest_point = possible_points[0]
closest_car = {}
closest_car_distance = None
inner_rectangle = car.inner_intersection_rectangle
passed = False
for key in following_list:
following_car = following_list[key]
if None in list(following_car.values()):
continue
following_intention = following_car['intention']
following_lane = following_car['lane']
collisions = collision_points(car_intention=intention, car_lane=lane,
follow_intention=following_intention, follow_lane=following_lane)
if closest_point in collisions:
following_x = following_car['pos_x']
following_y = following_car['pos_x']
following_origin_x = following_car['origin_x']
following_origin_y = following_car['origin_y']
car_length = following_car['car_length']
following_speed = following_car['speed']
following_acc_rate = following_car['acceleration']
following_actual_direction = following_car['direction']
following_origin_direction = following_car['origin_direction']
ticks_distance, passed_temp = ticks_to_pass_point(point=closest_point, speed=following_speed,
actual_direction=following_actual_direction,
origin_direction=following_origin_direction,
origin_x=following_origin_x, origin_y=following_origin_y,
acc_rate=following_acc_rate, lane=following_lane,
intention=following_intention,
inner_rectangle=inner_rectangle,
pos_x=following_x, pos_y=following_y, extra_distance=car_length)
if closest_car_distance is None or closest_car_distance > ticks_distance:
closest_car = following_list[key]
closest_car_distance = ticks_distance
passed = passed_temp
return closest_car, closest_car_distance, closest_point, passed
def algorithm_controller(car):
from functions.car_functions import ticks_to_reach_point, collision_free_acceleration, passed_point
following_list = car.get_following_cars()
if not following_list:
car.set_controller(accelerating_controller)
car.get_controller()(car)
else:
car_length = car.get_car_length()
speed = car.get_speed()
acc_rate = car.get_acceleration()
actual_direction = car.get_direction()
origin_direction = car.get_origin_direction()
lane = car.get_lane()
intention = car.get_intention()
inner_rectangle = car.inner_intersection_rectangle
pos_x, pos_y = car.get_position()
origin_x = car.get_origin_x_coordinate()
origin_y = car.get_origin_y_coordinate()
closest_car, closest_car_ticks_distance, closest_point, passed = get_closest_car(car)
ticks_in_three_seconds = 3 * (1 / (car.get_time_step() * car.get_speed_factor()))
if closest_point is None:
return
ticks_distance, reached = ticks_to_reach_point(point=closest_point, speed=speed, acc_rate=acc_rate,
actual_direction=actual_direction,
origin_direction=origin_direction, lane=lane, intention=intention,
inner_rectangle=inner_rectangle, pos_x=pos_x, pos_y=pos_y,
origin_x=origin_x, origin_y=origin_y, extra_distance=car_length)
if closest_car_ticks_distance == -1:
if passed:
print(car.get_name(), 'passed some shit')
car.set_controller(accelerating_controller)
car.get_controller()(car)
else:
car.set_acceleration(car.minimum_acceleration)
# could make it advance to the edge of the intersection if the car isn't inside it yet
elif ticks_distance - ticks_in_three_seconds <= closest_car_ticks_distance:
new_acceleration = collision_free_acceleration(closest_car_ticks_distance, closest_point, speed, pos_x,
pos_y)
car.set_acceleration(new_acceleration)
| 54.280702
| 127
| 0.62201
|
73ef095d29f49f6a97bd70763431b4c5cc223ee4
| 1,229
|
py
|
Python
|
account/models.py
|
sajib1066/blog
|
d53a642cdda8c501d9df85e1cda17362fb098fff
|
[
"MIT"
] | 1
|
2021-04-29T06:02:18.000Z
|
2021-04-29T06:02:18.000Z
|
account/models.py
|
sajib1066/blog
|
d53a642cdda8c501d9df85e1cda17362fb098fff
|
[
"MIT"
] | 4
|
2020-07-14T15:55:31.000Z
|
2022-01-13T02:42:31.000Z
|
account/models.py
|
sajib1066/blog
|
d53a642cdda8c501d9df85e1cda17362fb098fff
|
[
"MIT"
] | 3
|
2020-07-27T02:16:40.000Z
|
2021-06-15T08:56:56.000Z
|
from django.db import models
from django.contrib.auth.models import User
from sajibnotes.helper import get_current_user
class Profile(models.Model):
current_user = get_current_user
user = models.OneToOneField(User, on_delete=models.CASCADE, default=current_user)
email = models.EmailField(unique=True)
name = models.CharField(max_length=120)
address = models.TextField()
photo = models.ImageField(upload_to='profile/')
title = models.CharField(max_length=240)
about = models.TextField()
phone_number = models.CharField(max_length=14, unique=True)
gender_choice = (
('male', 'Male'),
('female', 'Female')
)
gender = models.CharField(choices=gender_choice, max_length=6)
facebook = models.URLField(blank=True, null=True)
twitter = models.URLField(blank=True, null=True)
instagram = models.URLField(blank=True, null=True)
linkedin = models.URLField(blank=True, null=True)
website = models.URLField(blank=True, null=True)
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class NewsLetter(models.Model):
email = models.EmailField(unique=True)
def __str__(self):
return self.email
| 35.114286
| 85
| 0.714402
|
38c06040dab0be5b49d30b50476c50de81dbb58a
| 2,053
|
py
|
Python
|
gemben/utils/evaluation_util.py
|
Sujit-O/gemben
|
4577914dbe4b39559093a6e9517c666b8e69c052
|
[
"BSD-3-Clause"
] | 1
|
2019-09-14T01:58:54.000Z
|
2019-09-14T01:58:54.000Z
|
gemben/utils/evaluation_util.py
|
Sujit-O/gem
|
4577914dbe4b39559093a6e9517c666b8e69c052
|
[
"BSD-3-Clause"
] | 6
|
2020-01-28T22:52:02.000Z
|
2022-02-10T00:25:50.000Z
|
gemben/utils/evaluation_util.py
|
Sujit-O/gem
|
4577914dbe4b39559093a6e9517c666b8e69c052
|
[
"BSD-3-Clause"
] | 1
|
2021-03-23T04:59:51.000Z
|
2021-03-23T04:59:51.000Z
|
import numpy as np
from random import randint
def getRandomEdgePairs(node_num, sample_ratio=0.01, is_undirected=True):
"""Function to get random edge pairs."""
num_pairs = int(sample_ratio * node_num * (node_num - 1))
if is_undirected:
num_pairs = num_pairs / 2
current_sets = set()
while(len(current_sets) < num_pairs):
p = (randint(node_num), randint(node_num))
if(p in current_sets):
continue
if(is_undirected and (p[1], p[0]) in current_sets):
continue
current_sets.add(p)
return list(current_sets)
def getEdgeListFromAdjMtx(adj, threshold=0.0, is_undirected=True, edge_pairs=None):
"""Function to get edgelist from adjaceny matrix."""
result = []
node_num = adj.shape[0]
if edge_pairs:
for (st, ed) in edge_pairs:
if adj[st, ed] >= threshold:
result.append((st, ed, adj[st, ed]))
else:
for i in range(node_num):
for j in range(node_num):
if(j == i):
continue
if(is_undirected and i >= j):
continue
if adj[i, j] > threshold:
result.append((i, j, adj[i, j]))
return result
def splitDiGraphToTrainTest(di_graph, train_ratio, is_undirected=True):
"""Function to split the directed graph to train and test sets."""
train_digraph = di_graph.copy()
test_digraph = di_graph.copy()
node_num = di_graph.number_of_nodes()
for (st, ed, w) in di_graph.edges.data('weight', default=1):
if(is_undirected and st >= ed):
continue
if(np.random.uniform() <= train_ratio):
test_digraph.remove_edge(st, ed)
if(is_undirected):
test_digraph.remove_edge(ed, st)
else:
train_digraph.remove_edge(st, ed)
if(is_undirected):
train_digraph.remove_edge(ed, st)
return (train_digraph, test_digraph)
| 35.396552
| 84
| 0.575256
|
23f9353ceaccaff71ff09f50e94cdda4aa10d612
| 1,706
|
py
|
Python
|
setup.py
|
JohannesSMHI/ctdpy
|
cae02aceb2e3b6e52cdb580eb26cc7d0485b40e7
|
[
"MIT"
] | 1
|
2018-11-16T13:58:29.000Z
|
2018-11-16T13:58:29.000Z
|
setup.py
|
JohannesSMHI/ctdpy
|
cae02aceb2e3b6e52cdb580eb26cc7d0485b40e7
|
[
"MIT"
] | null | null | null |
setup.py
|
JohannesSMHI/ctdpy
|
cae02aceb2e3b6e52cdb580eb26cc7d0485b40e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Created on Tue Sep 11 08:05:22 2018
@author: a002028
"""
import os
import setuptools
requirements = []
with open('requirements.txt', 'r') as fh:
for line in fh:
requirements.append(line.strip())
NAME = 'ctdpy'
VERSION = '0.1.7'
README = open('READMEpypi.rst', 'r').read()
setuptools.setup(
name=NAME,
version=VERSION,
author="Johannes Johansson",
author_email="johannes.johansson@smhi.se",
description="Package to handle CTD data",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/sharksmhi/ctdpy",
packages=setuptools.find_packages(),
package_data={'ctdpy': [os.path.join('core', 'etc', '*.json'),
os.path.join('core', 'etc', 'readers', '*.yaml'),
os.path.join('core', 'etc', 'writers', '*.yaml'),
os.path.join('core', 'etc', 'templates', '*.yaml'),
os.path.join('templates', '*.xlsx'),
os.path.join('templates', '*.txt'),
os.path.join('templates', 'archive_structure', '*.txt'),
os.path.join('templates', 'archive_structure', 'processed_data', '*.txt'),
os.path.join('templates', 'archive_structure', 'received_data'),
os.path.join('templates', '*.txt'),
]},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=requirements,
)
| 34.816327
| 102
| 0.543962
|
72390e33af86505d5f9bb1b2481b32bf80d2945e
| 1,808
|
py
|
Python
|
collections/trie2.py
|
gongchengshi/python_common
|
a68ab61e03c433a33f16119910e1e73e848ea824
|
[
"MIT"
] | null | null | null |
collections/trie2.py
|
gongchengshi/python_common
|
a68ab61e03c433a33f16119910e1e73e848ea824
|
[
"MIT"
] | null | null | null |
collections/trie2.py
|
gongchengshi/python_common
|
a68ab61e03c433a33f16119910e1e73e848ea824
|
[
"MIT"
] | 1
|
2018-09-19T00:33:35.000Z
|
2018-09-19T00:33:35.000Z
|
from collections import defaultdict
class Trie:
"""Taken from http://en.wikipedia.org/wiki/Trie"""
def __init__(self):
self.root = defaultdict(Trie)
self.value = None
def add(self, s, value):
"""Add the string `s` to the
`Trie` and map it to the given value."""
head, tail = s[0], s[1:]
cur_node = self.root[head]
if not tail:
cur_node.value = value
return # No further recursion
cur_node.add(tail, value)
def lookup(self, s, default=None):
"""Look up the value corresponding to
the string `s`. Expand the trie to cache the search."""
head, tail = s[0], s[1:]
node = self.root[head]
if tail:
return node.lookup(tail)
return node.value or default
def remove(self, s):
"""Remove the string s from the Trie.
Returns *True* if the string was a member."""
head, tail = s[0], s[1:]
if head not in self.root:
return False # Not contained
node = self.root[head]
if tail:
return node.remove(tail)
else:
del node
return True
def prefix(self, s):
"""Check whether the string `s` is a prefix
of some member. Don't expand the trie on negatives (cf.lookup)"""
if not s:
return True
head, tail = s[0], s[1:]
if head not in self.root:
return False # Not contained
node = self.root[head]
return node.prefix(tail)
def items(self):
"""Return an iterator over the items of the `Trie`."""
for char, node in self.root.iteritems():
if node.value is None:
yield node.items
else:
yield node
| 30.133333
| 73
| 0.538717
|
23c40fc67aead9a7b6e929d6ea8dab8e7b99977d
| 14,249
|
py
|
Python
|
glue/core/component.py
|
tiagopereira/glue
|
85bf7ce2d252d7bc405e8160b56fc83d46b9cbe4
|
[
"BSD-3-Clause"
] | 1
|
2019-12-17T07:58:35.000Z
|
2019-12-17T07:58:35.000Z
|
glue/core/component.py
|
scalet98/glue
|
ff949ad52e205c20561f48c05f870b2abb39e0b0
|
[
"BSD-3-Clause"
] | null | null | null |
glue/core/component.py
|
scalet98/glue
|
ff949ad52e205c20561f48c05f870b2abb39e0b0
|
[
"BSD-3-Clause"
] | 1
|
2019-08-04T14:10:12.000Z
|
2019-08-04T14:10:12.000Z
|
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
import pandas as pd
from glue.utils import (shape_to_string, coerce_numeric,
broadcast_to, categorical_ndarray)
__all__ = ['Component', 'DerivedComponent', 'CategoricalComponent',
'CoordinateComponent', 'DateTimeComponent']
class Component(object):
""" Stores the actual, numerical information for a particular quantity
Data objects hold one or more components, accessed via
ComponentIDs. All Components in a data set must have the same
shape and number of dimensions
Notes
-----
Instead of instantiating Components directly, consider using
:meth:`Component.autotyped`, which chooses a subclass most appropriate
for the data type.
"""
def __init__(self, data, units=None):
"""
:param data: The data to store
:type data: :class:`numpy.ndarray`
:param units: Optional unit label
:type units: str
"""
# The physical units of the data
self.units = units
# The actual data
# subclasses may pass non-arrays here as placeholders.
if isinstance(data, np.ndarray):
if data.dtype.kind == 'M':
raise TypeError('DateTimeComponent should be used instead of Component for np.datetime64 arrays')
data = coerce_numeric(data)
data.setflags(write=False) # data is read-only
self._data = data
@property
def units(self):
return self._units
@units.setter
def units(self, value):
if value is None:
self._units = ''
else:
self._units = str(value)
@property
def data(self):
""" The underlying :class:`numpy.ndarray` """
return self._data
@property
def shape(self):
""" Tuple of array dimensions """
return self._data.shape
@property
def ndim(self):
""" The number of dimensions """
return len(self._data.shape)
def __getitem__(self, key):
logging.debug("Using %s to index data of shape %s", key, self.shape)
return self._data[key]
@property
def numeric(self):
"""
Whether or not the datatype is numeric
"""
# We need to be careful here to not just access self.data since that
# would force the computation of the whole component in the case of
# derived components, so instead we specifically only get the first
# element.
return np.can_cast(self[(0,) * self.ndim].dtype, np.complex)
@property
def categorical(self):
"""
Whether or not the datatype is categorical
"""
return False
@property
def datetime(self):
"""
Whether or not or not the datatype is a date/time
"""
return False
def __str__(self):
return "%s with shape %s" % (self.__class__.__name__, shape_to_string(self.shape))
def jitter(self, method=None):
raise NotImplementedError
def to_series(self, **kwargs):
""" Convert into a pandas.Series object.
:param kwargs: All kwargs are passed to the Series constructor.
:return: pandas.Series
"""
return pd.Series(self.data.ravel(), **kwargs)
@classmethod
def autotyped(cls, data, units=None):
"""
Automatically choose between Component and CategoricalComponent,
based on the input data type.
:param data: The data to pack into a Component (array-like)
:param units: Optional units
:type units: str
:returns: A Component (or subclass)
"""
data = np.asarray(data)
if np.issubdtype(data.dtype, np.object_):
return CategoricalComponent(data, units=units)
if data.dtype.kind == 'M':
return DateTimeComponent(data)
n = coerce_numeric(data)
thresh = 0.5
try:
use_categorical = np.issubdtype(data.dtype, np.character) and \
np.isfinite(n).mean() <= thresh
except TypeError: # isfinite not supported. non-numeric dtype
use_categorical = True
if use_categorical:
return CategoricalComponent(data, units=units)
else:
return Component(n, units=units)
class DerivedComponent(Component):
""" A component which derives its data from a function """
def __init__(self, data, link, units=None):
"""
:param data: The data object to use for calculation
:type data: :class:`~glue.core.data.Data`
:param link: The link that carries out the function
:type link: :class:`~glue.core.component_link.ComponentLink`
:param units: Optional unit description
"""
super(DerivedComponent, self).__init__(data, units=units)
self._link = link
def set_parent(self, data):
""" Reassign the Data object that this DerivedComponent operates on """
self._data = data
@property
def data(self):
""" Return the numerical data as a numpy array """
return self._link.compute(self._data)
@property
def link(self):
""" Return the component link """
return self._link
def __getitem__(self, key):
return self._link.compute(self._data, key)
class CoordinateComponent(Component):
"""
Components associated with pixel or world coordinates
The numerical values are computed on the fly.
"""
def __init__(self, data, axis, world=False):
super(CoordinateComponent, self).__init__(None, None)
self.world = world
self._data = data
self.axis = axis
@property
def data(self):
return self._calculate()
def _calculate(self, view=None):
if self.world:
# Calculating the world coordinates can be a bottleneck if we aren't
# careful, so we need to make sure that if not all dimensions depend
# on each other, we use smart broadcasting.
# The unoptimized way to do this for an N-dimensional dataset would
# be to construct N-dimensional arrays of pixel values for each
# coordinate. However, if we are computing the coordinates for axis
# i, and axis i is not dependent on any other axis, then the result
# will be an N-dimensional array where the same 1D array of
# coordinates will be repeated over and over.
# To optimize this, we therefore essentially consider only the
# dependent dimensions and then broacast the result to the full
# array size at the very end.
# view=None actually adds a dimension which is never what we really
# mean, at least in glue.
if view is None:
view = Ellipsis
# If the view is a tuple or list of arrays, we should actually just
# convert these straight to world coordinates since the indices
# of the pixel coordinates are the pixel coordinates themselves.
if isinstance(view, (tuple, list)) and isinstance(view[0], np.ndarray):
axis = self._data.ndim - 1 - self.axis
return self._data.coords.pixel2world_single_axis(*view[::-1], axis=axis)
# For 1D arrays, slice can be given as a single slice but we need
# to wrap it in a list to make the following code work correctly,
# as it is then consistent with higher-dimensional cases.
if isinstance(view, slice) or np.isscalar(view):
view = [view]
# Some views, e.g. with lists of integer arrays, can give arbitrarily
# complex (copied) subsets of arrays, so in this case we don't do any
# optimization
if view is Ellipsis:
optimize_view = False
else:
for v in view:
if not np.isscalar(v) and not isinstance(v, slice):
optimize_view = False
break
else:
optimize_view = True
pix_coords = []
dep_coords = self._data.coords.dependent_axes(self.axis)
final_slice = []
final_shape = []
for i in range(self._data.ndim):
if optimize_view and i < len(view) and np.isscalar(view[i]):
final_slice.append(0)
else:
final_slice.append(slice(None))
# We set up a 1D pixel axis along that dimension.
pix_coord = np.arange(self._data.shape[i])
# If a view was specified, we need to take it into account for
# that axis.
if optimize_view and i < len(view):
pix_coord = pix_coord[view[i]]
if not np.isscalar(view[i]):
final_shape.append(len(pix_coord))
else:
final_shape.append(self._data.shape[i])
if i not in dep_coords:
# The axis is not dependent on this instance's axis, so we
# just compute the values once and broadcast along this
# dimension later.
pix_coord = 0
pix_coords.append(pix_coord)
# We build the list of N arrays, one for each pixel coordinate
pix_coords = np.meshgrid(*pix_coords, indexing='ij', copy=False)
# Finally we convert these to world coordinates
axis = self._data.ndim - 1 - self.axis
world_coords = self._data.coords.pixel2world_single_axis(*pix_coords[::-1],
axis=axis)
# We get rid of any dimension for which using the view should get
# rid of that dimension.
if optimize_view:
world_coords = world_coords[tuple(final_slice)]
# We then broadcast the final array back to what it should be
world_coords = broadcast_to(world_coords, tuple(final_shape))
# We apply the view if we weren't able to optimize before
if optimize_view:
return world_coords
else:
return world_coords[view]
else:
slices = [slice(0, s, 1) for s in self.shape]
grids = np.broadcast_arrays(*np.ogrid[slices])
if view is not None:
grids = [g[view] for g in grids]
return grids[self.axis]
@property
def shape(self):
""" Tuple of array dimensions. """
return self._data.shape
@property
def ndim(self):
""" Number of dimensions """
return len(self._data.shape)
def __getitem__(self, key):
return self._calculate(key)
def __lt__(self, other):
if self.world == other.world:
return self.axis < other.axis
return self.world
def __gluestate__(self, context):
return dict(axis=self.axis, world=self.world)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(None, rec['axis'], rec['world'])
@property
def numeric(self):
return True
@property
def categorical(self):
return False
class CategoricalComponent(Component):
"""
Container for categorical data.
"""
def __init__(self, categorical_data, categories=None, jitter=None, units=None):
"""
:param categorical_data: The underlying :class:`numpy.ndarray`
:param categories: List of unique values in the data
:jitter: Strategy for jittering the data
"""
# TOOD: deal with custom categories
super(CategoricalComponent, self).__init__(None, units)
self._data = categorical_ndarray(categorical_data, copy=False, categories=categories)
if self._data.ndim != 1:
raise ValueError("Categorical Data must be 1-dimensional")
self.jitter(method=jitter)
@property
def codes(self):
"""
The index of the category for each value in the array.
"""
return self._data.codes
@property
def labels(self):
"""
The original categorical data.
"""
return self._data.view(np.ndarray)
@property
def categories(self):
"""
The categories.
"""
return self._data.categories
@property
def data(self):
return self._data
@property
def numeric(self):
return False
@property
def categorical(self):
return True
def jitter(self, method=None):
"""
Jitter the codes so the density of points can be easily seen in a
scatter plot for example.
Parameters
----------
method : {None, 'uniform'}
If `None`, not jittering is done (or any jittering is undone).
If ``'uniform'``, the codes are randomized by a uniformly
distributed random variable.
"""
self._data.jitter(method=method)
self.jitter_method = method
def to_series(self, **kwargs):
"""
Convert into a pandas.Series object.
This will be converted as a dtype=np.object!
:param kwargs: All kwargs are passed to the Series constructor.
:return: pandas.Series
"""
return pd.Series(self.labels, dtype=np.object, **kwargs)
class DateTimeComponent(Component):
"""
A component representing a date/time.
Parameters
----------
data : `~numpy.ndarray`
The data to store, with `~numpy.datetime64` dtype
"""
def __init__(self, data, units=None):
self.units = units
if not isinstance(data, np.ndarray) or data.dtype.kind != 'M':
raise TypeError("DateTimeComponent should be initialized with a datetim64 Numpy array")
self._data = data
@property
def numeric(self):
return True
@property
def datetime(self):
return True
| 30.446581
| 113
| 0.590498
|
b64e20e8fb51503aac0fd35869aa3cc2fc86759f
| 8,366
|
py
|
Python
|
extras/island.py
|
prideout/clumpy
|
8c8b33e8834b96cff071ee048d32903917297e64
|
[
"MIT"
] | 36
|
2018-07-14T16:36:41.000Z
|
2022-03-16T08:15:28.000Z
|
extras/island.py
|
prideout/clumpy
|
8c8b33e8834b96cff071ee048d32903917297e64
|
[
"MIT"
] | 1
|
2022-01-13T01:25:24.000Z
|
2022-01-13T01:25:24.000Z
|
extras/island.py
|
prideout/clumpy
|
8c8b33e8834b96cff071ee048d32903917297e64
|
[
"MIT"
] | 3
|
2018-12-07T11:31:50.000Z
|
2021-05-23T21:38:34.000Z
|
#!/usr/bin/env python3
'''
Creates a movie of infinitely zooming FBM.
All coordinates are X,Y floats with values that increase rightward and downward:
World Space: [0,0 through 1,1] spans the entire island.
Tile Space: [0,0 through 1,1] spans the smallest tile that wholly encompasses the viewport.
Viewport Space: [0,0 through 1,1] spans the current view.
At Z = 0, Tile Space is equivalent to World Space.
Note that numpy requires Row,Col integer coordinates, but we internalize those at the lowest level.
(see sample_pixel)
'''
from os import system
from tqdm import tqdm
from sdl2.ext import clipline
import cairo
import imageio
import numpy as np
import scipy.interpolate as interp
def vec2(x, y): return np.array([x, y], dtype=np.float)
def vec3(x, y, z): return np.array([x, y, z], dtype=np.float)
def grid(w, h): return np.zeros([int(h), int(w)], dtype=np.float)
# Configuration.
Resolution = vec2(512,512)
VideoFps = 30
NumFrames = VideoFps * 5
vsTargetLn = vec2([.4,.4], [.9,.9])
vsPanFocus = vec2(0.5, 0.5)
SeaLevel = 0.5
NicePalette = [
000, 0x001070 , # Dark Blue
126, 0x2C5A7C , # Light Blue
127, 0xE0F0A0 , # Yellow
128, 0x5D943C , # Dark Green
160, 0x606011 , # Brown
200, 0xFFFFFF , # White
255, 0xFFFFFF ] # White
# Global data.
NoiseFrequency = 16.0
NumLayers = 4
Width, Height = Resolution
Lut = grid(3, 256)
Zoom = int(0)
vsTargetPt = vec2(-1,-1)
ViewImage = grid(Width, Height) ## Current viewport. Includes NumLayers of high-freq noise.
TileImage = grid(Width, Height) ## Smallest encompassing tile (accumulated low-freq noise).
Viewports = []
def update_view(nlayers = NumLayers):
resample_image(ViewImage, TileImage, Viewports[-1])
seed = Zoom
for vp in Viewports[:nlayers]:
noise = gradient_noise(Resolution, vp, NoiseFrequency, seed=seed)
np.copyto(ViewImage, 2 * (ViewImage + noise))
seed = seed + 1
def update_tile():
global Zoom
global Viewports
global NoiseFrequency
# Render a new base tile by adding one layer of noise.
update_view(1)
np.copyto(TileImage, ViewImage)
# Left-shift the viewports array and push on a new high-frequency layer.
Viewports = Viewports[1:] + [vec2((0,0),(1,1))]
Zoom = Zoom + 1
NoiseFrequency = min(NoiseFrequency * 1.5, 512.0)
def main():
global vsTargetPt
global NoiseFrequency
global NumLayers
NoiseFrequency = 16.0
NumLayers = 4
create_viewports()
np.copyto(Lut, create_palette())
np.copyto(TileImage, create_basetile(Width, Height))
update_view()
np.copyto(TileImage, ViewImage)
NumLayers = 1
create_viewports()
update_view()
vsTargetPt = marching_line(ViewImage, vsTargetLn)
writer = imageio.get_writer('out.mp4', fps=VideoFps, quality=9)
for frame in tqdm(range(NumFrames)):
# Draw the heightmap for the current viewport.
update_view()
# Recompute the point of interest.
vsTargetPt = marching_line(ViewImage, vsTargetLn)
# Draw the overlay and convert the heightmap into color.
rgb = render_view()
writer.append_data(np.uint8(rgb))
# Compute the pan / zoom adjustments for the largest viewport.
vpdelta = shrink_viewport(Viewports[-1], zoom_speed=10, pan_speed=0.05)
# Propagate the movement to all layer viewports.
for vp in reversed(Viewports):
np.copyto(vp, vp + vpdelta)
vpdelta = vpdelta / 2
# If the largest viewport is sufficiently small, it's time to increment zoom.
vp = Viewports[-1]
vpextent = vp[1] - vp[0]
if vpextent[0] < 0.5 and vpextent[1] < 0.5:
update_tile()
writer.close()
def render_view():
lo, hi = np.amin(ViewImage), np.amax(ViewImage)
L1 = Lut[np.uint8(255 * (0.5 + 0.5 * ViewImage / (hi - lo)))]
draw_overlay(L1, vsTargetLn, vsTargetPt)
lo, hi = np.amin(TileImage), np.amax(TileImage)
L2 = Lut[np.uint8(255 * (0.5 + 0.5 * TileImage / (hi - lo)))]
# Crop so that the stack is roughly 1920x1080
crop = Width - Width * 960/1080
w0, w1 = int(crop/2), int(Width - crop/2)
L1 = L1[:, w0:w1, :]
L2 = L2[:, w0:w1, :]
return np.hstack([L1, L2])
def shrink_viewport(viewport, zoom_speed, pan_speed):
vpextent = viewport[1] - viewport[0]
pandir = vsTargetPt - vsPanFocus
pan_delta = pan_speed * pandir
zoom_delta = zoom_speed * vpextent / Resolution
return pan_delta + vec2(zoom_delta, -zoom_delta)
def draw_overlay(dst, lineseg, pxcoord):
dims = [dst.shape[1], dst.shape[0]]
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, dims[0], dims[1])
ctx = cairo.Context(surface)
ctx.scale(dims[0], dims[1])
ctx.set_line_width(0.005)
# Stroke a path along lineseg
ctx.set_source_rgba(1.0, 0.8, 0.8, 0.15)
ctx.move_to(lineseg[0][0], lineseg[0][1])
ctx.line_to(lineseg[1][0], lineseg[1][1])
ctx.stroke()
# Draw circle around pxcoord
ctx.set_source_rgba(1.0, 0.8, 0.8, 0.15)
ctx.save()
ctx.translate(pxcoord[0], pxcoord[1])
ctx.scale(0.02, 0.02)
ctx.arc(0., 0., 1., 0., 2 * np.pi)
ctx.restore()
ctx.stroke()
if False:
f = cairo.ToyFontFace("")
ctx.move_to(.5, .5)
ctx.show_text("Philip")
# Perform composition
buf = surface.get_data()
rgb = np.ndarray(shape=dims[:2], dtype=np.uint32, buffer=buf)
color = np.float32([(rgb >> 8) & 0xff, (rgb >> 16) & 0xff, (rgb >> 0) & 0xff])
color = color.swapaxes(0, 2).swapaxes(0, 1)
a = np.float32((rgb >> 24) & 0xff) / 255.0
alpha = np.array([a, a, a]).swapaxes(0, 2).swapaxes(0, 1)
np.copyto(dst, dst * (1 - alpha) + color)
def clumpy(cmd):
result = system('./clumpy ' + cmd)
if result: raise Exception("clumpy failed with: " + cmd)
def gradient_noise(dims, viewport, frequency, seed):
(left, top), (right, bottom) = 2 * (viewport - 0.5)
args = "{}x{} '{},{},{},{}' {} {}".format(
dims[0], dims[1],
left, -bottom, right, -top,
frequency, seed)
clumpy("gradient_noise " + args)
noise = np.load('gradient_noise.npy')
return noise
def sample_pixel(image_array, x, y):
rows, cols = image_array.shape
row, col = int(y * rows), int(x * cols)
if row < 0 or col < 0 or col >= cols or row >= rows:
return 0
return image_array[row][col]
def marching_line(image_array, line_segment):
x0, y0 = line_segment[0]
x1, y1 = line_segment[1]
val = sample_pixel(image_array, x0, y0)
sgn = np.sign(val)
divs = float(max(Resolution))
dx = (x1 - x0) / divs
dy = (y1 - y0) / divs
for i in range(int(divs)):
x = x0 + i * dx
y = y0 + i * dy
val = sample_pixel(image_array, x, y)
if np.sign(val) != sgn:
return [x, y]
print(f'Could not find in {x0:3.3},{y0:3.3} -- {x1:3.3},{y1:3.3}')
return [0.5,0.5]
# TODO: use NicePalette
def create_palette():
r = np.hstack([np.linspace(0.0, 0.0, num=128), np.linspace(0.0, 0.0, num=128)])
g = np.hstack([np.linspace(0.0, 0.0, num=128), np.linspace(128.0, 255.0, num=128)])
b = np.hstack([np.linspace(128.0, 255.0, num=128), np.linspace(0.0, 64.0, num=128)])
return np.float32([r, g, b]).transpose()
# Hermite interpolation, also known as smoothstep:
# (-1 => 0) (0 => 1) (+1 => 0)
def hermite(t):
return 1 - (3 - 2*np.abs(t))*t*t
def create_basetile(width, height):
rows = hermite([np.linspace(-1.0, 1.0, num=height)])
cols = hermite([np.linspace(-1.0, 1.0, num=width)]).T
return rows * cols - SeaLevel
def resample_image(dst, src, viewport):
height, width = dst.shape
domain = [np.linspace(0, 1, num) for num in (width, height)]
[(left, top), (right, bottom)] = viewport
vrange = np.linspace(left, right, num=width)
urange = np.linspace(top, bottom, num=height)
f = interp.interp1d(domain[0], src, kind='linear', fill_value='extrapolate')
temp = f(vrange)
f = interp.interp1d(domain[1], temp.T, kind='linear', fill_value='extrapolate')
newimg = f(urange).T
np.copyto(dst, newimg)
def create_viewports():
global Viewports
Viewports = []
frequency = 1
for x in range(NumLayers):
vp = vec2((0,0), (frequency,frequency))
Viewports.insert(0, vp)
frequency = frequency / 2
main()
| 32.807843
| 99
| 0.629931
|
e604b994ca3498103ac7cd80a4f5fa33b07d7a39
| 848
|
py
|
Python
|
src/streamlink/__init__.py
|
Billy2011/streamlink
|
5f99ec52e0a9c315aeee00b96287edc45adaccd3
|
[
"BSD-2-Clause"
] | 1
|
2019-09-14T10:19:47.000Z
|
2019-09-14T10:19:47.000Z
|
src/streamlink/__init__.py
|
Billy2011/streamlink
|
5f99ec52e0a9c315aeee00b96287edc45adaccd3
|
[
"BSD-2-Clause"
] | 1
|
2018-07-12T18:18:05.000Z
|
2018-07-12T18:18:05.000Z
|
src/streamlink/__init__.py
|
Billy2011/streamlink
|
5f99ec52e0a9c315aeee00b96287edc45adaccd3
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Streamlink extracts streams from various services.
The main compontent of Streamlink is a command-line utility that
launches the streams in a video player.
An API is also provided that allows direct access to stream data.
Full documentation is available at https://Billy2011.github.io/streamlink-27.
"""
__version__ = "2.27.3.0-dev"
__version_date__ = "2022-05-16"
__title__ = "streamlink-27"
__license__ = "Simplified BSD"
__author__ = "Streamlink, Billy2011"
__copyright__ = "Copyright 2022 Streamlink, Billy2011"
__credits__ = ["https://github.com/streamlink/streamlink/blob/master/AUTHORS"]
from streamlink.api import streams
from streamlink.exceptions import (StreamlinkError, PluginError, NoStreamsError,
NoPluginError, StreamError)
from streamlink.session import Streamlink
| 35.333333
| 80
| 0.757075
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.